text
stringlengths
2
1.1M
id
stringlengths
11
117
metadata
dict
__index_level_0__
int64
0
885
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "strings" // Notes: // - Less-than-64-bit integer types live in the low portion of registers. // The upper portion is junk. // - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk. // - *const instructions may use a constant larger than the instruction can encode. // In this case the assembler expands to multiple instructions and uses tmp // register (R31). var regNamesPPC64 = []string{ "R0", // REGZERO, not used, but simplifies counting in regalloc "SP", // REGSP "SB", // REGSB "R3", "R4", "R5", "R6", "R7", "R8", "R9", "R10", "R11", // REGCTXT for closures "R12", "R13", // REGTLS "R14", "R15", "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23", "R24", "R25", "R26", "R27", "R28", "R29", "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen". "R31", // REGTMP "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", // "F31", the allocator is limited to 64 entries. We sacrifice this FPR to support XER. "XER", // If you add registers, update asyncPreempt in runtime. // "CR0", // "CR1", // "CR2", // "CR3", // "CR4", // "CR5", // "CR6", // "CR7", // "CR", // "LR", // "CTR", } func init() { // Make map from reg names to reg integers. if len(regNamesPPC64) > 64 { panic("too many registers") } num := map[string]int{} for i, name := range regNamesPPC64 { num[name] = i } buildReg := func(s string) regMask { m := regMask(0) for _, r := range strings.Split(s, " ") { if n, ok := num[r]; ok { m |= regMask(1) << uint(n) continue } panic("register " + r + " not found") } return m } var ( gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29") fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30") sp = buildReg("SP") sb = buildReg("SB") gr = buildReg("g") xer = buildReg("XER") // cr = buildReg("CR") // ctr = buildReg("CTR") // lr = buildReg("LR") tmp = buildReg("R31") ctxt = buildReg("R11") callptr = buildReg("R12") // tls = buildReg("R13") gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} xergp = regInfo{inputs: []regMask{xer}, outputs: []regMask{gp}, clobbers: xer} gp11cxer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer} gp11xer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp, xer}} gp1xer1xer = regInfo{inputs: []regMask{gp | sp | sb, xer}, outputs: []regMask{gp, xer}, clobbers: xer} gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}} gp21cxer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer} gp21xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, xer}, clobbers: xer} gp2xer1xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, xer}, outputs: []regMask{gp, xer}, clobbers: xer} gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} gp1cr = regInfo{inputs: []regMask{gp | sp | sb}} gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} crgp = regInfo{inputs: nil, outputs: []regMask{gp}} crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} prefreg = regInfo{inputs: []regMask{gp | sp | sb}} gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}} gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}} fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} fp2cr = regInfo{inputs: []regMask{fp, fp}} fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}} fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}} fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}} fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}} callerSave = regMask(gp | fp | gr | xer) r3 = buildReg("R3") r4 = buildReg("R4") r5 = buildReg("R5") r6 = buildReg("R6") ) ops := []opData{ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 {name: "ADDCC", argLength: 2, reg: gp21, asm: "ADDCC", commutative: true, typ: "(Int,Flags)"}, // arg0 + arg1 {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt {name: "ADDCCconst", argLength: 1, reg: gp11cxer, asm: "ADDCCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0 + auxInt sets CC, clobbers XER {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 {name: "SUBCC", argLength: 2, reg: gp21, asm: "SUBCC", typ: "(Int,Flags)"}, // arg0-arg1 sets CC {name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored) {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 // Note, the FPU works with float64 in register. {name: "XSMINJDP", argLength: 2, reg: fp21, asm: "XSMINJDP"}, // fmin(arg0,arg1) {name: "XSMAXJDP", argLength: 2, reg: fp21, asm: "XSMAXJDP"}, // fmax(arg0,arg1) {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit) {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1 {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1 {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2 {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2 {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2 {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2 {name: "SRAD", argLength: 2, reg: gp21cxer, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!) {name: "SRAW", argLength: 2, reg: gp21cxer, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA. // The constant shift values are packed into the aux int32. {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, // {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, // // Operations which consume or generate the CA (xer) {name: "ADDC", argLength: 2, reg: gp21xer, asm: "ADDC", commutative: true, typ: "(UInt64, UInt64)"}, // arg0 + arg1 -> out, CA {name: "SUBC", argLength: 2, reg: gp21xer, asm: "SUBC", typ: "(UInt64, UInt64)"}, // arg0 - arg1 -> out, CA {name: "ADDCconst", argLength: 1, reg: gp11xer, asm: "ADDC", typ: "(UInt64, UInt64)", aux: "Int64"}, // arg0 + imm16 -> out, CA {name: "SUBCconst", argLength: 1, reg: gp11xer, asm: "SUBC", typ: "(UInt64, UInt64)", aux: "Int64"}, // imm16 - arg0 -> out, CA {name: "ADDE", argLength: 3, reg: gp2xer1xer, asm: "ADDE", typ: "(UInt64, UInt64)", commutative: true}, // arg0 + arg1 + CA (arg2) -> out, CA {name: "ADDZE", argLength: 2, reg: gp1xer1xer, asm: "ADDZE", typ: "(UInt64, UInt64)"}, // arg0 + CA (arg1) -> out, CA {name: "SUBE", argLength: 3, reg: gp2xer1xer, asm: "SUBE", typ: "(UInt64, UInt64)"}, // arg0 - arg1 - CA (arg2) -> out, CA {name: "ADDZEzero", argLength: 1, reg: xergp, asm: "ADDZE", typ: "UInt64"}, // CA (arg0) + $0 -> out {name: "SUBZEzero", argLength: 1, reg: xergp, asm: "SUBZE", typ: "UInt64"}, // $0 - CA (arg0) -> out {name: "SRADconst", argLength: 1, reg: gp11cxer, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRAWconst", argLength: 1, reg: gp11cxer, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"}, {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. {name: "RLDICLCC", argLength: 1, reg: gp11, asm: "RLDICLCC", aux: "Int64", typ: "(Int, Flags)"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. Sets CC. {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0. {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros {name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW"}, // count leading zeros (32 bit) {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit) {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0 {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1 {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1 {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit) {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit) {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit) {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit) {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit) {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit) {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit) {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit) // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1 // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register. {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC. // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues). // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 {name: "ANDNCC", argLength: 2, reg: gp21, asm: "ANDNCC", typ: "(Int64,Flags)"}, // arg0&^arg1 sets CC {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) {name: "NORCC", argLength: 2, reg: gp21, asm: "NORCC", commutative: true, typ: "(Int,Flags)"}, // ^(arg0|arg1) sets CC {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) {name: "NEGCC", argLength: 1, reg: gp11, asm: "NEGCC", typ: "(Int,Flags)"}, // -arg0 (integer) sets CC {name: "BRD", argLength: 1, reg: gp11, asm: "BRD"}, // reversebytes64(arg0) {name: "BRW", argLength: 1, reg: gp11, asm: "BRW"}, // reversebytes32(arg0) {name: "BRH", argLength: 1, reg: gp11, asm: "BRH"}, // reversebytes16(arg0) {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64 {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, clobberFlags: true, asm: "ANDCC", aux: "Int64", typ: "Int"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64 {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64 {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64 {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64 // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register. {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend. // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used. // In these cases the index register field is set to 0 and the full address is in the base register. {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", typ: "UInt64", faultOnNilArg0: true}, // load 8 bytes reverse order {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", typ: "UInt32", faultOnNilArg0: true}, // load 4 bytes zero extend reverse order {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", typ: "UInt16", faultOnNilArg0: true}, // load 2 bytes zero extend reverse order // In these cases an index register is used in addition to a base register // Loads from memory location arg[0] + arg[1]. {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64 {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64 {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64 {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64 {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64 {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"}, {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64 {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64 {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"}, {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"}, {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"}, // Prefetch instruction // Do prefetch of address generated with arg0 and arg1 with option aux. arg0=addr,arg1=memory, aux=option. {name: "DCBT", argLength: 2, aux: "Int64", reg: prefreg, asm: "DCBT", hasSideEffects: true}, // Store bytes in the reverse endian order of the arch into arg0. // These are indexed stores with no offset field in the instruction so the auxint fields are not used. {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes reverse order {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes reverse order {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes reverse order // Floating point loads from arg0+aux+auxint {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float // Store bytes in the endian order of the arch into arg0+aux+auxint {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes // Store floating point value into arg0+aux+auxint {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float // Stores using index and base registers // Stores to arg[0] + arg[1] {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg // The following ops store 0 into arg0+aux+auxint arg1=mem {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, // {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, // {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, // {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"}, {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"}, {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"}, {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"}, {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"}, // ISEL arg2 ? arg0 : arg1 // ISELZ arg1 ? arg0 : $0 // auxInt values 0=LT 1=GT 2=EQ 3=SO (summary overflow/unordered) 4=GE 5=LE 6=NE 7=NSO (not summary overflow/not unordered) // Note, auxInt^4 inverts the comparison condition. For example, LT^4 becomes GE, and "ISEL [a] x y z" is equivalent to ISEL [a^4] y x z". {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, {name: "ISELZ", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32"}, // SETBC auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 1 : 0 {name: "SETBC", argLength: 1, reg: crgp, asm: "SETBC", aux: "Int32", typ: "Int32"}, // SETBCR auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 0 : 1 {name: "SETBCR", argLength: 1, reg: crgp, asm: "SETBCR", aux: "Int32", typ: "Int32"}, // pseudo-ops {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise. {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise. {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise. {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise. {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, // and sorts it to the very beginning of the block to prevent other // use of the closure pointer. {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true}, // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. // I.e., if f calls g "calls" getcallerpc, // the result should be the PC within f that g will return to. // See runtime/stubs.go for a more detailed discussion. {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, // Round ops to block fused-multiply-add extraction. {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // large or unaligned zeroing // arg0 = address of memory to zero (in R3, changed as side effect) // returns mem // // a loop is generated when there is more than one iteration // needed to clear 4 doublewords // // XXLXOR VS32,VS32,VS32 // MOVD $len/32,R31 // MOVD R31,CTR // MOVD $16,R31 // loop: // STXVD2X VS32,(R0)(R3) // STXVD2X VS32,(R31)(R3) // ADD R3,32 // BC loop // remaining doubleword clears generated as needed // MOVD R0,(R3) // MOVD R0,8(R3) // MOVD R0,16(R3) // MOVD R0,24(R3) // one or more of these to clear remainder < 8 bytes // MOVW R0,n1(R3) // MOVH R0,n2(R3) // MOVB R0,n3(R3) { name: "LoweredZero", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{buildReg("R20")}, clobbers: buildReg("R20"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredZeroShort", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{gp}}, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredQuadZeroShort", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{gp}, }, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredQuadZero", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{buildReg("R20")}, clobbers: buildReg("R20"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, // R31 is temp register // Loop code: // MOVD len/32,R31 set up loop ctr // MOVD R31,CTR // MOVD $16,R31 index register // loop: // LXVD2X (R0)(R4),VS32 // LXVD2X (R31)(R4),VS33 // ADD R4,$32 increment src // STXVD2X VS32,(R0)(R3) // STXVD2X VS33,(R31)(R3) // ADD R3,$32 increment dst // BC 16,0,loop branch ctr // For this purpose, VS32 and VS33 are treated as // scratch registers. Since regalloc does not // track vector registers, even if it could be marked // as clobbered it would have no effect. // TODO: If vector registers are managed by regalloc // mark these as clobbered. // // Bytes not moved by this loop are moved // with a combination of the following instructions, // starting with the largest sizes and generating as // many as needed, using the appropriate offset value. // MOVD n(R4),R14 // MOVD R14,n(R3) // MOVW n1(R4),R14 // MOVW R14,n1(R3) // MOVH n2(R4),R14 // MOVH R14,n2(R3) // MOVB n3(R4),R14 // MOVB R14,n3(R3) { name: "LoweredMove", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: buildReg("R20 R21"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, { name: "LoweredMoveShort", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{gp, gp}, }, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, // The following is similar to the LoweredMove, but uses // LXV instead of LXVD2X, which does not require an index // register and will do 4 in a loop instead of only. { name: "LoweredQuadMove", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: buildReg("R20 R21"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, { name: "LoweredQuadMoveShort", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{gp, gp}, }, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, // atomic add32, 64 // LWSYNC // LDAR (Rarg0), Rout // ADD Rarg1, Rout // STDCCC Rout, (Rarg0) // BNE -3(PC) // return new sum {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic exchange32, 64 // LWSYNC // LDAR (Rarg0), Rout // STDCCC Rarg1, (Rarg0) // BNE -2(PC) // ISYNC // return old val {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic compare and swap. // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. // if *arg0 == arg1 { // *arg0 = arg2 // return (true, memory) // } else { // return (false, memory) // } // SYNC // LDAR (Rarg0), Rtmp // CMP Rarg1, Rtmp // BNE 3(PC) // STDCCC Rarg2, (Rarg0) // BNE -4(PC) // CBNZ Rtmp, -4(PC) // CSET EQ, Rout {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic 8/32 and/or. // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero. // LBAR/LWAT (Rarg0), Rtmp // AND/OR Rarg1, Rtmp // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp // BNE Rtmp, -3(PC) {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and R20 and R21, // but may clobber anything else, including R31 (REGTMP). // Returns a pointer to a write barrier buffer in R29. {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"}, {name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory // There are three of these functions so that they can have three different register inputs. // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the // default registers to match so we don't need to copy registers around unnecessarily. {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). // (InvertFlags (CMP a b)) == (CMP b a) // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant, // then we do (LessThan (InvertFlags (CMP b a))) instead. // Rewrites will convert this to (GreaterThan (CMP b a)). // InvertFlags is a pseudo-op which can't appear in assembly output. {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 // Constant flag values. For any comparison, there are 3 possible // outcomes: either the three from the signed total order (<,==,>) // or the three from the unsigned total order, depending on which // comparison operation was used (CMP or CMPU -- PPC is different from // the other architectures, which have a single comparison producing // both signed and unsigned comparison results.) // These ops are for temporary use by rewrite rules. They // cannot appear in the generated assembly. {name: "FlagEQ"}, // equal {name: "FlagLT"}, // signed < or unsigned < {name: "FlagGT"}, // signed > or unsigned > } blocks := []blockData{ {name: "EQ", controls: 1}, {name: "NE", controls: 1}, {name: "LT", controls: 1}, {name: "LE", controls: 1}, {name: "GT", controls: 1}, {name: "GE", controls: 1}, {name: "FLT", controls: 1}, {name: "FLE", controls: 1}, {name: "FGT", controls: 1}, {name: "FGE", controls: 1}, } archs = append(archs, arch{ name: "PPC64", pkg: "cmd/internal/obj/ppc64", genfile: "../../ppc64/ssa.go", ops: ops, blocks: blocks, regnames: regNamesPPC64, ParamIntRegNames: "R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17", ParamFloatRegNames: "F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12", gpregmask: gp, fpregmask: fp, specialregmask: xer, framepointerreg: -1, linkreg: -1, // not used }) }
go/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go", "repo_id": "go", "token_count": 20213 }
100
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Simplifications that apply to all backend architectures. As an example, this // Go source code // // y := 0 * x // // can be translated into y := 0 without losing any information, which saves a // pointless multiplication instruction. Other .rules files in this directory // (for example AMD64.rules) contain rules specific to the architecture in the // filename. The rules here apply to every architecture. // // The code for parsing this file lives in rulegen.go; this file generates // ssa/rewritegeneric.go. // values are specified using the following format: // (op <type> [auxint] {aux} arg0 arg1 ...) // the type, aux, and auxint fields are optional // on the matching side // - the type, aux, and auxint fields must match if they are specified. // - the first occurrence of a variable defines that variable. Subsequent // uses must match (be == to) the first use. // - v is defined to be the value matched. // - an additional conditional can be provided after the match pattern with "&&". // on the generated side // - the type of the top-level expression is the same as the one on the left-hand side. // - the type of any subexpressions must be specified explicitly (or // be specified in the op's type field). // - auxint will be 0 if not specified. // - aux will be nil if not specified. // blocks are specified using the following format: // (kind controlvalue succ0 succ1 ...) // controlvalue must be "nil" or a value expression // succ* fields must be variables // For now, the generated successors must be a permutation of the matched successors. // constant folding (Trunc16to8 (Const16 [c])) => (Const8 [int8(c)]) (Trunc32to8 (Const32 [c])) => (Const8 [int8(c)]) (Trunc32to16 (Const32 [c])) => (Const16 [int16(c)]) (Trunc64to8 (Const64 [c])) => (Const8 [int8(c)]) (Trunc64to16 (Const64 [c])) => (Const16 [int16(c)]) (Trunc64to32 (Const64 [c])) => (Const32 [int32(c)]) (Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)]) (Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)]) (Cvt32to32F (Const32 [c])) => (Const32F [float32(c)]) (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) (Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) (Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) (Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) (Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) (Round32F x:(Const32F)) => x (Round64F x:(Const64F)) => x (CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) (CvtBoolToUint8 (ConstBool [true])) => (Const8 [1]) (Trunc16to8 (ZeroExt8to16 x)) => x (Trunc32to8 (ZeroExt8to32 x)) => x (Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x) (Trunc32to16 (ZeroExt16to32 x)) => x (Trunc64to8 (ZeroExt8to64 x)) => x (Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x) (Trunc64to16 (ZeroExt16to64 x)) => x (Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x) (Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x) (Trunc64to32 (ZeroExt32to64 x)) => x (Trunc16to8 (SignExt8to16 x)) => x (Trunc32to8 (SignExt8to32 x)) => x (Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x) (Trunc32to16 (SignExt16to32 x)) => x (Trunc64to8 (SignExt8to64 x)) => x (Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x) (Trunc64to16 (SignExt16to64 x)) => x (Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x) (Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x) (Trunc64to32 (SignExt32to64 x)) => x (ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))]) (ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))]) (ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))]) (ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))]) (ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))]) (ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))]) (SignExt8to16 (Const8 [c])) => (Const16 [int16(c)]) (SignExt8to32 (Const8 [c])) => (Const32 [int32(c)]) (SignExt8to64 (Const8 [c])) => (Const64 [int64(c)]) (SignExt16to32 (Const16 [c])) => (Const32 [int32(c)]) (SignExt16to64 (Const16 [c])) => (Const64 [int64(c)]) (SignExt32to64 (Const32 [c])) => (Const64 [int64(c)]) (Neg8 (Const8 [c])) => (Const8 [-c]) (Neg16 (Const16 [c])) => (Const16 [-c]) (Neg32 (Const32 [c])) => (Const32 [-c]) (Neg64 (Const64 [c])) => (Const64 [-c]) (Neg32F (Const32F [c])) && c != 0 => (Const32F [-c]) (Neg64F (Const64F [c])) && c != 0 => (Const64F [-c]) (Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d]) (Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d]) (Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d]) (Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d]) (Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d]) (Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d]) (AddPtr <t> x (Const64 [c])) => (OffPtr <t> x [c]) (AddPtr <t> x (Const32 [c])) => (OffPtr <t> x [int64(c)]) (Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d]) (Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d]) (Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d]) (Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d]) (Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d]) (Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d]) (Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d]) (Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d]) (Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d]) (Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d]) (Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d]) (Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d]) (And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d]) (And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d]) (And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d]) (And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d]) (Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d]) (Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d]) (Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d]) (Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d]) (Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d]) (Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d]) (Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d]) (Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d]) (Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))]) (Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))]) (Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))]) (Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))]) (Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))]) (Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))]) (Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))]) (Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))]) (Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d]) (Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d]) (Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d]) (Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d]) (Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))]) (Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))]) (Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))]) (Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))]) (Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d]) (Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d]) (Select0 (Div128u (Const64 [0]) lo y)) => (Div64u lo y) (Select1 (Div128u (Const64 [0]) lo y)) => (Mod64u lo y) (Not (ConstBool [c])) => (ConstBool [!c]) (Floor (Const64F [c])) => (Const64F [math.Floor(c)]) (Ceil (Const64F [c])) => (Const64F [math.Ceil(c)]) (Trunc (Const64F [c])) => (Const64F [math.Trunc(c)]) (RoundToEven (Const64F [c])) => (Const64F [math.RoundToEven(c)]) // Convert x * 1 to x. (Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x (Select0 (Mul(32|64)uover (Const(32|64) [1]) x)) => x (Select1 (Mul(32|64)uover (Const(32|64) [1]) x)) => (ConstBool [false]) // Convert x * -1 to -x. (Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x) // DeMorgan's Laws (And(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) <t> x y)) (Or(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) <t> x y)) // Convert multiplication by a power of two to a shift. (Mul8 <t> n (Const8 [c])) && isPowerOfTwo8(c) => (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)])) (Mul16 <t> n (Const16 [c])) && isPowerOfTwo16(c) => (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)])) (Mul32 <t> n (Const32 [c])) && isPowerOfTwo32(c) => (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)])) (Mul64 <t> n (Const64 [c])) && isPowerOfTwo64(c) => (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)])) (Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo8(-c) => (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)]))) (Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo16(-c) => (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)]))) (Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo32(-c) => (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)]))) (Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo64(-c) => (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)]))) (Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d]) (Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d]) (Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d]) (Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d]) (Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))]) (Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))]) (Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))]) (Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))]) (Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)]) (Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)]) (Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))]) (Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)]) (Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)]) (Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))]) (Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)]) (Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)]) (Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))]) (Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)]) (Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)]) (Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))]) // Fold IsInBounds when the range of the index cannot exceed the limit. (IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true]) (IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true]) (IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true]) (IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true]) (IsInBounds x x) => (ConstBool [false]) (IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true]) (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true]) (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true]) (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) (IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true]) (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true]) (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) (IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true]) (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) (IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true]) (IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d]) (IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d]) // (Mod64u x y) is always between 0 (inclusive) and y (exclusive). (IsInBounds (Mod32u _ y) y) => (ConstBool [true]) (IsInBounds (Mod64u _ y) y) => (ConstBool [true]) // Right shifting an unsigned number limits its value. (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true]) (IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true]) (IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true]) (IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true]) (IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true]) (IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true]) (IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true]) (IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true]) (IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true]) (IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1<<uint(64-c)-1 < d => (ConstBool [true]) (IsSliceInBounds x x) => (ConstBool [true]) (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true]) (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true]) (IsSliceInBounds (Const32 [0]) _) => (ConstBool [true]) (IsSliceInBounds (Const64 [0]) _) => (ConstBool [true]) (IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d]) (IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d]) (IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true]) (Eq(64|32|16|8) x x) => (ConstBool [true]) (EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d]) (EqB (ConstBool [false]) x) => (Not x) (EqB (ConstBool [true]) x) => x (Neq(64|32|16|8) x x) => (ConstBool [false]) (NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d]) (NeqB (ConstBool [false]) x) => x (NeqB (ConstBool [true]) x) => (Not x) (NeqB (Not x) (Not y)) => (NeqB x y) (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Eq64 (Const64 <t> [c-d]) x) (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Eq32 (Const32 <t> [c-d]) x) (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Eq16 (Const16 <t> [c-d]) x) (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Eq8 (Const8 <t> [c-d]) x) (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Neq64 (Const64 <t> [c-d]) x) (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Neq32 (Const32 <t> [c-d]) x) (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x) (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x) // signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) ) (AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c])) (AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c])) (AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c])) (AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c])) // signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) ) (AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1])) (AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1])) (AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1])) (AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1])) // unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c ) (AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c])) (AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c])) (AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c])) (AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c])) // unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) ) (AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1])) (AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1])) (AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1])) (AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1])) // signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) ) (OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d]))) (OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d]))) (OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d]))) (OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d]))) // signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) ) (OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1]))) (OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1]))) (OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1]))) (OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1]))) // unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d ) (OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d]))) (OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d]))) (OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d]))) // unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) ) (OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1]))) (OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1]))) (OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1]))) // Canonicalize x-const to x+(-const) (Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 => (Add64 (Const64 <t> [-c]) x) (Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 => (Add32 (Const32 <t> [-c]) x) (Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 => (Add16 (Const16 <t> [-c]) x) (Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 => (Add8 (Const8 <t> [-c]) x) // fold negation into comparison operators (Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y) (Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y) (Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x) (Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x) (Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x) (Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x) // Distribute multiplication c * (d+x) -> c*d + c*x. Useful for: // a[i].b = ...; a[i+1].b = ... (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x)) (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x)) // Rewrite x*y ± x*z to x*(y±z) (Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z)) (Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z)) // rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce // the number of the other rewrite rules for const shifts (Lsh64x32 <t> x (Const32 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint32(c))])) (Lsh64x16 <t> x (Const16 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint16(c))])) (Lsh64x8 <t> x (Const8 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint8(c))])) (Rsh64x32 <t> x (Const32 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint32(c))])) (Rsh64x16 <t> x (Const16 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint16(c))])) (Rsh64x8 <t> x (Const8 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint8(c))])) (Rsh64Ux32 <t> x (Const32 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh64Ux16 <t> x (Const16 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh64Ux8 <t> x (Const8 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))])) (Lsh32x32 <t> x (Const32 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint32(c))])) (Lsh32x16 <t> x (Const16 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint16(c))])) (Lsh32x8 <t> x (Const8 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint8(c))])) (Rsh32x32 <t> x (Const32 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint32(c))])) (Rsh32x16 <t> x (Const16 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint16(c))])) (Rsh32x8 <t> x (Const8 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint8(c))])) (Rsh32Ux32 <t> x (Const32 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh32Ux16 <t> x (Const16 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh32Ux8 <t> x (Const8 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))])) (Lsh16x32 <t> x (Const32 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint32(c))])) (Lsh16x16 <t> x (Const16 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint16(c))])) (Lsh16x8 <t> x (Const8 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint8(c))])) (Rsh16x32 <t> x (Const32 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint32(c))])) (Rsh16x16 <t> x (Const16 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint16(c))])) (Rsh16x8 <t> x (Const8 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint8(c))])) (Rsh16Ux32 <t> x (Const32 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh16Ux16 <t> x (Const16 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh16Ux8 <t> x (Const8 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))])) (Lsh8x32 <t> x (Const32 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint32(c))])) (Lsh8x16 <t> x (Const16 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint16(c))])) (Lsh8x8 <t> x (Const8 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint8(c))])) (Rsh8x32 <t> x (Const32 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint32(c))])) (Rsh8x16 <t> x (Const16 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint16(c))])) (Rsh8x8 <t> x (Const8 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint8(c))])) (Rsh8Ux32 <t> x (Const32 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))])) (Rsh8Ux16 <t> x (Const16 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))])) (Rsh8Ux8 <t> x (Const8 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))])) // shifts by zero (Lsh(64|32|16|8)x64 x (Const64 [0])) => x (Rsh(64|32|16|8)x64 x (Const64 [0])) => x (Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x // rotates by multiples of register width (RotateLeft64 x (Const64 [c])) && c%64 == 0 => x (RotateLeft32 x (Const32 [c])) && c%32 == 0 => x (RotateLeft16 x (Const16 [c])) && c%16 == 0 => x (RotateLeft8 x (Const8 [c])) && c%8 == 0 => x // zero shifted (Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) (Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) (Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) (Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) (Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) (Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) (Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) (Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) (Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) (Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) (Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) (Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) // large left shifts of all values, and right shifts of unsigned values ((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0]) ((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) ((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) ((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) // combine const shifts (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 <t> [c+d])) (Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 <t> [c+d])) (Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 <t> [c+d])) (Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 <t> [c+d])) (Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 <t> [c+d])) (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 <t> [c+d])) (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 <t> [c+d])) (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 <t> [c+d])) (Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 <t> [c+d])) (Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 <t> [c+d])) (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 <t> [c+d])) (Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 <t> [c+d])) // Remove signed right shift before an unsigned right shift that extracts the sign bit. (Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] )) => (Rsh8Ux64 x (Const64 <t> [7] )) (Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15])) => (Rsh16Ux64 x (Const64 <t> [15])) (Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31])) => (Rsh32Ux64 x (Const64 <t> [31])) (Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63])) => (Rsh64Ux64 x (Const64 <t> [63])) // Convert x>>c<<c to x&^(1<<c-1) (Lsh64x64 i:(Rsh(64|64U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(-1) << c])) (Lsh32x64 i:(Rsh(32|32U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(-1) << c])) (Lsh16x64 i:(Rsh(16|16U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(-1) << c])) (Lsh8x64 i:(Rsh(8|8U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8(-1) << c])) // similarly for x<<c>>c (Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(^uint64(0)>>c)])) (Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(^uint32(0)>>c)])) (Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(^uint16(0)>>c)])) (Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8 (^uint8 (0)>>c)])) // ((x >> c1) << c2) >> c3 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) => (Rsh(64|32|16|8)Ux64 x (Const64 <typ.UInt64> [c1-c2+c3])) // ((x << c1) >> c2) << c3 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) => (Lsh(64|32|16|8)x64 x (Const64 <typ.UInt64> [c1-c2+c3])) // (x >> c) & uppermask = 0 (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0]) (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0]) (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0]) (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0]) // (x << c) & lowermask = 0 (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0]) (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0]) (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0]) (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0]) // replace shifts with zero extensions (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x)) (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x)) (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x)) (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x)) (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x)) (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x)) // replace shifts with sign extensions (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 <typ.Int8> x)) (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 <typ.Int8> x)) (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 <typ.Int8> x)) (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 <typ.Int16> x)) (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 <typ.Int16> x)) (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 <typ.Int32> x)) // constant comparisons (Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d]) (Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d]) (Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d]) (Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d]) (Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)]) (Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)]) (Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)]) (Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)]) (Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)]) (Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)]) (Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)]) (Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)]) (Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true]) (Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true]) (Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true]) (Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true]) (Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) (Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) (Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) (Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) // prefer equalities with zero (Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) && isNonNegative(x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) (Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) && isNonNegative(x) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) (Less(64|32|16|8)U x (Const(64|32|16|8) <t> [1])) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) (Leq(64|32|16|8)U (Const(64|32|16|8) <t> [1]) x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) // prefer comparisons with zero (Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) => (Leq(64|32|16|8) x (Const(64|32|16|8) <t> [0])) (Leq(64|32|16|8) x (Const(64|32|16|8) <t> [-1])) => (Less(64|32|16|8) x (Const(64|32|16|8) <t> [0])) (Leq(64|32|16|8) (Const(64|32|16|8) <t> [1]) x) => (Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) (Less(64|32|16|8) (Const(64|32|16|8) <t> [-1]) x) => (Leq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) // constant floating point comparisons (Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d]) (Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d]) (Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d]) (Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d]) (Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d]) (Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d]) (Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d]) (Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d]) // simplifications (Or(64|32|16|8) x x) => x (Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x (Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1]) (Or(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) (And(64|32|16|8) x x) => x (And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x (And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0]) (And(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [0]) (Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0]) (Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x (Xor(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) (Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x (Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0]) (Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0]) (Select0 (Mul(64|32)uover (Const(64|32) [0]) x)) => (Const(64|32) [0]) (Select1 (Mul(64|32)uover (Const(64|32) [0]) x)) => (ConstBool [false]) (Com(64|32|16|8) (Com(64|32|16|8) x)) => x (Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c]) (Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x) (Add(64|32|16|8) x (Neg(64|32|16|8) y)) => (Sub(64|32|16|8) x y) (Xor(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Com(64|32|16|8) x) (Sub(64|32|16|8) (Neg(64|32|16|8) x) (Com(64|32|16|8) x)) => (Const(64|32|16|8) [1]) (Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1]) (Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) // Simplification when involving common integer // (t + x) - (t + y) == x - y // (t + x) - (y + t) == x - y // (x + t) - (y + t) == x - y // (x + t) - (t + y) == x - y // (x - t) + (t + y) == x + y // (x - t) + (y + t) == x + y (Sub(64|32|16|8) (Add(64|32|16|8) t x) (Add(64|32|16|8) t y)) => (Sub(64|32|16|8) x y) (Add(64|32|16|8) (Sub(64|32|16|8) x t) (Add(64|32|16|8) t y)) => (Add(64|32|16|8) x y) // ^(x-1) == ^x+1 == -x (Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x) (Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x) // -(-x) == x (Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x // -^x == x+1 (Neg(64|32|16|8) <t> (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) <t> [1]) x) (And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y) (Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y) (Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y // Fold comparisons with numeric bounds (Less(64|32|16|8)U _ (Const(64|32|16|8) [0])) => (ConstBool [false]) (Leq(64|32|16|8)U (Const(64|32|16|8) [0]) _) => (ConstBool [true]) (Less(64|32|16|8)U (Const(64|32|16|8) [-1]) _) => (ConstBool [false]) (Leq(64|32|16|8)U _ (Const(64|32|16|8) [-1])) => (ConstBool [true]) (Less64 _ (Const64 [math.MinInt64])) => (ConstBool [false]) (Less32 _ (Const32 [math.MinInt32])) => (ConstBool [false]) (Less16 _ (Const16 [math.MinInt16])) => (ConstBool [false]) (Less8 _ (Const8 [math.MinInt8 ])) => (ConstBool [false]) (Leq64 (Const64 [math.MinInt64]) _) => (ConstBool [true]) (Leq32 (Const32 [math.MinInt32]) _) => (ConstBool [true]) (Leq16 (Const16 [math.MinInt16]) _) => (ConstBool [true]) (Leq8 (Const8 [math.MinInt8 ]) _) => (ConstBool [true]) (Less64 (Const64 [math.MaxInt64]) _) => (ConstBool [false]) (Less32 (Const32 [math.MaxInt32]) _) => (ConstBool [false]) (Less16 (Const16 [math.MaxInt16]) _) => (ConstBool [false]) (Less8 (Const8 [math.MaxInt8 ]) _) => (ConstBool [false]) (Leq64 _ (Const64 [math.MaxInt64])) => (ConstBool [true]) (Leq32 _ (Const32 [math.MaxInt32])) => (ConstBool [true]) (Leq16 _ (Const16 [math.MaxInt16])) => (ConstBool [true]) (Leq8 _ (Const8 [math.MaxInt8 ])) => (ConstBool [true]) // Canonicalize <= on numeric bounds and < near numeric bounds to == (Leq(64|32|16|8)U x c:(Const(64|32|16|8) [0])) => (Eq(64|32|16|8) x c) (Leq(64|32|16|8)U c:(Const(64|32|16|8) [-1]) x) => (Eq(64|32|16|8) x c) (Less(64|32|16|8)U x (Const(64|32|16|8) <t> [1])) => (Eq(64|32|16|8) x (Const(64|32|16|8) <t> [0])) (Less(64|32|16|8)U (Const(64|32|16|8) <t> [-2]) x) => (Eq(64|32|16|8) x (Const(64|32|16|8) <t> [-1])) (Leq64 x c:(Const64 [math.MinInt64])) => (Eq64 x c) (Leq32 x c:(Const32 [math.MinInt32])) => (Eq32 x c) (Leq16 x c:(Const16 [math.MinInt16])) => (Eq16 x c) (Leq8 x c:(Const8 [math.MinInt8 ])) => (Eq8 x c) (Leq64 c:(Const64 [math.MaxInt64]) x) => (Eq64 x c) (Leq32 c:(Const32 [math.MaxInt32]) x) => (Eq32 x c) (Leq16 c:(Const16 [math.MaxInt16]) x) => (Eq16 x c) (Leq8 c:(Const8 [math.MaxInt8 ]) x) => (Eq8 x c) (Less64 x (Const64 <t> [math.MinInt64+1])) => (Eq64 x (Const64 <t> [math.MinInt64])) (Less32 x (Const32 <t> [math.MinInt32+1])) => (Eq32 x (Const32 <t> [math.MinInt32])) (Less16 x (Const16 <t> [math.MinInt16+1])) => (Eq16 x (Const16 <t> [math.MinInt16])) (Less8 x (Const8 <t> [math.MinInt8 +1])) => (Eq8 x (Const8 <t> [math.MinInt8 ])) (Less64 (Const64 <t> [math.MaxInt64-1]) x) => (Eq64 x (Const64 <t> [math.MaxInt64])) (Less32 (Const32 <t> [math.MaxInt32-1]) x) => (Eq32 x (Const32 <t> [math.MaxInt32])) (Less16 (Const16 <t> [math.MaxInt16-1]) x) => (Eq16 x (Const16 <t> [math.MaxInt16])) (Less8 (Const8 <t> [math.MaxInt8 -1]) x) => (Eq8 x (Const8 <t> [math.MaxInt8 ])) // Ands clear bits. Ors set bits. // If a subsequent Or will set all the bits // that an And cleared, we can skip the And. // This happens in bitmasking code like: // x &^= 3 << shift // clear two old bits // x |= v << shift // set two new bits // when shift is a small constant and v ends up a constant 3. (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 <t> [c1]) x) (Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 <t> [c1]) x) (Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 <t> [c1]) x) (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 <t> [c1]) x) (Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x) (Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x) (Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x) (Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x) (Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x) (Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x) (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x (Slicemask (Const32 [x])) && x > 0 => (Const32 [-1]) (Slicemask (Const32 [0])) => (Const32 [0]) (Slicemask (Const64 [x])) && x > 0 => (Const64 [-1]) (Slicemask (Const64 [0])) => (Const64 [0]) // simplifications often used for lengths. e.g. len(s[i:i+5])==5 (Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y (Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x (Sub(64|32|16|8) (Sub(64|32|16|8) x y) x) => (Neg(64|32|16|8) y) (Sub(64|32|16|8) x (Add(64|32|16|8) x y)) => (Neg(64|32|16|8) y) (Add(64|32|16|8) x (Sub(64|32|16|8) y x)) => y (Add(64|32|16|8) x (Add(64|32|16|8) y (Sub(64|32|16|8) z x))) => (Add(64|32|16|8) y z) // basic phi simplifications (Phi (Const8 [c]) (Const8 [c])) => (Const8 [c]) (Phi (Const16 [c]) (Const16 [c])) => (Const16 [c]) (Phi (Const32 [c]) (Const32 [c])) => (Const32 [c]) (Phi (Const64 [c]) (Const64 [c])) => (Const64 [c]) // slice and interface comparisons // The frontend ensures that we can only compare against nil, // so we need only compare the first word (interface type or slice ptr). (EqInter x y) => (EqPtr (ITab x) (ITab y)) (NeqInter x y) => (NeqPtr (ITab x) (ITab y)) (EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y)) (NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y)) // Load of store of same address, with compatibly typed value and same size (Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() => x (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _))) && isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size()) => x (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) && isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size()) => x (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) && isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size()) => x // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))]) (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))]) (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))]) (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))]) // Float Loads up to Zeros so they can be constant folded. (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _))) && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem) (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _)))) && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem) (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem) (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _)))))) && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size()) => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem) // Zero to Load forwarding. (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1 => (ConstBool [false]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1 => (Const8 [0]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2 => (Const16 [0]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4 => (Const32 [0]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8 => (Const64 [0]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4 => (Const32F [0]) (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _)) && is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8 => (Const64F [0]) // Eliminate stores of values that have just been loaded from the same location. // We also handle the common case where there are some intermediate stores. (Store {t1} p1 (Load <t2> p2 mem) mem) && isSamePtr(p1, p2) && t2.Size() == t1.Size() => mem (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem)) && isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) => mem (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) && isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) => mem (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) && isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size()) => mem // Don't Store zeros to cleared variables. (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) && isConstZero(x) && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2) => mem (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) && isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size()) => mem (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) && isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) => mem (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) && isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) => mem // Collapse OffPtr (OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y]) (OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p // indexing operations // Note: bounds check has already been done (PtrIndex <t> ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())]))) (PtrIndex <t> ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()]))) // struct operations (StructSelect (StructMake1 x)) => x (StructSelect [0] (StructMake2 x _)) => x (StructSelect [1] (StructMake2 _ x)) => x (StructSelect [0] (StructMake3 x _ _)) => x (StructSelect [1] (StructMake3 _ x _)) => x (StructSelect [2] (StructMake3 _ _ x)) => x (StructSelect [0] (StructMake4 x _ _ _)) => x (StructSelect [1] (StructMake4 _ x _ _)) => x (StructSelect [2] (StructMake4 _ _ x _)) => x (StructSelect [3] (StructMake4 _ _ _ x)) => x (Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && CanSSA(t) => (StructMake0) (Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && CanSSA(t) => (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)) (Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && CanSSA(t) => (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)) (Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && CanSSA(t) => (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)) (Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && CanSSA(t) => (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem)) (StructSelect [i] x:(Load <t> ptr mem)) && !CanSSA(t) => @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem) (Store _ (StructMake0) mem) => mem (Store dst (StructMake1 <t> f0) mem) => (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem) (Store dst (StructMake2 <t> f0 f1) mem) => (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)) (Store dst (StructMake3 <t> f0 f1 f2) mem) => (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))) (Store dst (StructMake4 <t> f0 f1 f2 f3) mem) => (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))) // Putting struct{*byte} and similar into direct interfaces. (IMake _typ (StructMake1 val)) => (IMake _typ val) (StructSelect [0] (IData x)) => (IData x) // un-SSAable values use mem->mem copies (Store {t} dst (Load src mem) mem) && !CanSSA(t) => (Move {t} [t.Size()] dst src mem) (Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) => (Move {t} [t.Size()] dst src (VarDef {x} mem)) // array ops (ArraySelect (ArrayMake1 x)) => x (Load <t> _ _) && t.IsArray() && t.NumElem() == 0 => (ArrayMake0) (Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) => (ArrayMake1 (Load <t.Elem()> ptr mem)) (Store _ (ArrayMake0) mem) => mem (Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem) // Putting [1]*byte and similar into direct interfaces. (IMake _typ (ArrayMake1 val)) => (IMake _typ val) (ArraySelect [0] (IData x)) => (IData x) // string ops // Decomposing StringMake and lowering of StringPtr and StringLen // happens in a later pass, dec, so that these operations are available // to other passes for optimizations. (StringPtr (StringMake (Addr <t> {s} base) _)) => (Addr <t> {s} base) (StringLen (StringMake _ (Const64 <t> [c]))) => (Const64 <t> [c]) (ConstString {str}) && config.PtrSize == 4 && str == "" => (StringMake (ConstNil) (Const32 <typ.Int> [0])) (ConstString {str}) && config.PtrSize == 8 && str == "" => (StringMake (ConstNil) (Const64 <typ.Int> [0])) (ConstString {str}) && config.PtrSize == 4 && str != "" => (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const32 <typ.Int> [int32(len(str))])) (ConstString {str}) && config.PtrSize == 8 && str != "" => (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const64 <typ.Int> [int64(len(str))])) // slice ops // Only a few slice rules are provided here. See dec.rules for // a more comprehensive set. (SliceLen (SliceMake _ (Const64 <t> [c]) _)) => (Const64 <t> [c]) (SliceCap (SliceMake _ _ (Const64 <t> [c]))) => (Const64 <t> [c]) (SliceLen (SliceMake _ (Const32 <t> [c]) _)) => (Const32 <t> [c]) (SliceCap (SliceMake _ _ (Const32 <t> [c]))) => (Const32 <t> [c]) (SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x) (SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x) (SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x) (SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x) (ConstSlice) && config.PtrSize == 4 => (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const32 <typ.Int> [0]) (Const32 <typ.Int> [0])) (ConstSlice) && config.PtrSize == 8 => (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const64 <typ.Int> [0]) (Const64 <typ.Int> [0])) // interface ops (ConstInterface) => (IMake (ConstNil <typ.Uintptr>) (ConstNil <typ.BytePtr>)) (NilCheck ptr:(GetG mem) mem) => ptr (If (Not cond) yes no) => (If cond no yes) (If (ConstBool [c]) yes no) && c => (First yes no) (If (ConstBool [c]) yes no) && !c => (First no yes) (Phi <t> nx:(Not x) ny:(Not y)) && nx.Uses == 1 && ny.Uses == 1 => (Not (Phi <t> x y)) // Get rid of Convert ops for pointer arithmetic on unsafe.Pointer. (Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off) (Convert (Convert ptr mem) mem) => ptr // strength reduction of divide by a constant. // See ../magic.go for a detailed description of these algorithms. // Unsigned divide by power of 2. Strength reduce to a shift. (Div8u n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)])) (Div16u n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)])) (Div32u n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)])) (Div64u n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)])) (Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 <typ.UInt64> [63])) // Signed non-negative divide by power of 2. (Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)])) (Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)])) (Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)])) (Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)])) (Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0]) // Unsigned divide, not a power of 2. Strength reduce to a multiply. // For 8-bit divides, we just do a direct 9-bit by 8-bit multiply. (Div8u x (Const8 [c])) && umagicOK8(c) => (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic8(c).s]))) // For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply. (Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 => (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic16(c).s]))) // For 16-bit divides on 32-bit machines (Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 => (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic16(c).s-1]))) (Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 => (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic16(c).s-2]))) (Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg => (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic16(c).s-1]))) // For 32-bit divides on 32-bit machines (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul => (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)]) x) (Const64 <typ.UInt64> [umagic32(c).s-1])) (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul => (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic32(c).s-2])) (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul => (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic32(c).m)]) x)) (Const64 <typ.UInt64> [umagic32(c).s-1])) // For 32-bit divides on 64-bit machines // We'll use a regular (non-hi) multiply for this case. (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 => (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic32(c).s-1]))) (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 => (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic32(c).s-2]))) (Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg => (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic32(c).s-1]))) // For unsigned 64-bit divides on 32-bit machines, // if the constant fits in 16 bits (so that the last term // fits in 32 bits), convert to three 32-bit divides by a constant. // // If 1<<32 = Q * c + R // and x = hi << 32 + lo // // Then x = (hi/c*c + hi%c) << 32 + lo // = hi/c*c<<32 + hi%c<<32 + lo // = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c // = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c) // and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c (Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul => (Add64 (Add64 <typ.UInt64> (Add64 <typ.UInt64> (Lsh64x64 <typ.UInt64> (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [32])) (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])))) (Mul64 <typ.UInt64> (ZeroExt32to64 <typ.UInt64> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u <typ.UInt32> (Add32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])) (Mul32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)])) (Const32 <typ.UInt32> [int32((1<<32)%c)]))) (Const32 <typ.UInt32> [int32(c)])))) // For 64-bit divides on 64-bit machines // (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.) (Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul => (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1])) (Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul => (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic64(c).s-2])) (Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul => (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic64(c).m)]) x)) (Const64 <typ.UInt64> [umagic64(c).s-1])) // Signed divide by a negative constant. Rewrite to divide by a positive constant. (Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c]))) (Div16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 <t> n (Const16 <t> [-c]))) (Div32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 <t> n (Const32 <t> [-c]))) (Div64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 <t> n (Const64 <t> [-c]))) // Dividing by the most-negative number. Result is always 0 except // if the input is also the most-negative number. // We can detect that using the sign bit of x & -x. (Div8 <t> x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ])) (Div16 <t> x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15])) (Div32 <t> x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31])) (Div64 <t> x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63])) // Signed divide by power of 2. // n / c = n >> log(c) if n >= 0 // = (n+c-1) >> log(c) if n < 0 // We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned). (Div8 <t> n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))]))) (Const64 <typ.UInt64> [int64(log8(c))])) (Div16 <t> n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))]))) (Const64 <typ.UInt64> [int64(log16(c))])) (Div32 <t> n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))]))) (Const64 <typ.UInt64> [int64(log32(c))])) (Div64 <t> n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))]))) (Const64 <typ.UInt64> [int64(log64(c))])) // Signed divide, not a power of 2. Strength reduce to a multiply. (Div8 <t> x (Const8 [c])) && smagicOK8(c) => (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic8(c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31]))) (Div16 <t> x (Const16 [c])) && smagicOK16(c) => (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic16(c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31]))) (Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 => (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic32(c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63]))) (Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul => (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m/2)]) x) (Const64 <typ.UInt64> [smagic32(c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31]))) (Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul => (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m)]) x) x) (Const64 <typ.UInt64> [smagic32(c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31]))) (Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul => (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m/2)]) x) (Const64 <typ.UInt64> [smagic64(c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63]))) (Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul => (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m)]) x) x) (Const64 <typ.UInt64> [smagic64(c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63]))) // Unsigned mod by power of 2 constant. (Mod8u <t> n (Const8 [c])) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1])) (Mod16u <t> n (Const16 [c])) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1])) (Mod32u <t> n (Const32 [c])) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1])) (Mod64u <t> n (Const64 [c])) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1])) (Mod64u <t> n (Const64 [-1<<63])) => (And64 n (Const64 <t> [1<<63-1])) // Signed non-negative mod by power of 2 constant. (Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1])) (Mod16 <t> n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1])) (Mod32 <t> n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1])) (Mod64 <t> n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1])) (Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n // Signed mod by negative constant. (Mod8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 <t> n (Const8 <t> [-c])) (Mod16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 <t> n (Const16 <t> [-c])) (Mod32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 <t> n (Const32 <t> [-c])) (Mod64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 <t> n (Const64 <t> [-c])) // All other mods by constants, do A%B = A-(A/B*B). // This implements % with two * and a bunch of ancillary ops. // One of the * is free if the user's code also computes A/B. (Mod8 <t> x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7) => (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c]))) (Mod16 <t> x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15) => (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c]))) (Mod32 <t> x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31) => (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c]))) (Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63) => (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c]))) (Mod8u <t> x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c) => (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c]))) (Mod16u <t> x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c) => (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c]))) (Mod32u <t> x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c) => (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c]))) (Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c) => (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c]))) // For architectures without rotates on less than 32-bits, promote these checks to 32-bit. (Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) => (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0])) (Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) => (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0])) (Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) => (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0])) (Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) => (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0])) // Divisibility checks x%c == 0 convert to multiply and rotate. // Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass // where (x/c) is performed using multiplication with magic constants. // To rewrite x%c == 0 requires pattern matching the rewritten expression // and checking that the division by the same constant wasn't already calculated. // This check is made by counting uses of the magic constant multiplication. // Note that if there were an intermediate opt pass, this rule could be applied // directly on the Div op and magic division rewrites could be delayed to late opt. // Unsigned divisibility checks convert to multiply and rotate. (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c) => (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(udivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(udivisible8(c).max)]) ) (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c) => (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) ) (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) => (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) ) (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c) => (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) ) (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c) => (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 <typ.UInt32> [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) ) (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c) => (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) ) (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c) => (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) ) (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c) => (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) ) // Signed divisibility checks convert to multiply, add and rotate. (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c) => (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(sdivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(sdivisible8(c).a)]) ) (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(sdivisible8(c).max)]) ) (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c) => (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(sdivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(sdivisible16(c).a)]) ) (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(sdivisible16(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) ) (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c) => (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) ) (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c) => (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) ) (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) ) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c) => (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) ) // Divisibility check for signed integers for power of two constant are simple mask. // However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c) // where n/c contains fixup code to handle signed n. ((Eq8|Neq8) n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) ) && k > 0 && k < 7 && kbar == 8 - k => ((Eq8|Neq8) (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0])) ((Eq16|Neq16) n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) ) && k > 0 && k < 15 && kbar == 16 - k => ((Eq16|Neq16) (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0])) ((Eq32|Neq32) n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) ) && k > 0 && k < 31 && kbar == 32 - k => ((Eq32|Neq32) (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0])) ((Eq64|Neq64) n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) ) && k > 0 && k < 63 && kbar == 64 - k => ((Eq64|Neq64) (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0])) (Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y) (Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y) // Optimize bitsets (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y) => (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0])) (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y) => (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0])) (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y) => (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0])) (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y) => (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0])) (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y) => (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0])) (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y) => (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0])) (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y) => (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0])) (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y) => (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0])) // Reassociate expressions involving // constants such that constants come first, // exposing obvious constant-folding opportunities. // Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C // is constant, which pushes constants to the outside // of the expression. At that point, any constant-folding // opportunities should be obvious. // Note: don't include AddPtr here! In order to maintain the // invariant that pointers must stay within the pointed-to object, // we can't pull part of a pointer computation above the AddPtr. // See issue 37881. // Note: we don't need to handle any (x-C) cases because we already rewrite // (x-C) to (x+(-C)). // x + (C + z) -> C + (x + z) (Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x)) (Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x)) (Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x)) (Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x)) // x + (C - z) -> C + (x - z) (Add64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z)) (Add32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z)) (Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z)) (Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z)) // x - (C - z) -> x + (z - C) -> (x + z) - C (Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i) (Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i) (Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i) (Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i) // x - (z + C) -> x + (-z - C) -> (x - z) - C (Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i) (Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i) (Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i) (Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i) // (C - z) - x -> C - (z + x) (Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x)) (Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x)) (Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x)) (Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x)) // (z + C) -x -> C + (z - x) (Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x)) (Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x)) (Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x)) (Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x)) // x & (C & z) -> C & (x & z) (And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x)) (And32 (And32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 <t> z x)) (And16 (And16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 <t> z x)) (And8 (And8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 <t> z x)) // x | (C | z) -> C | (x | z) (Or64 (Or64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 <t> z x)) (Or32 (Or32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 <t> z x)) (Or16 (Or16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 <t> z x)) (Or8 (Or8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 <t> z x)) // x ^ (C ^ z) -> C ^ (x ^ z) (Xor64 (Xor64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 <t> z x)) (Xor32 (Xor32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 <t> z x)) (Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x)) (Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x)) // x * (D * z) = D * (x * z) (Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z)) (Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z)) (Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z)) (Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z)) // C + (D + x) -> (C + D) + x (Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x) (Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x) (Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c+d]) x) (Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c+d]) x) // C + (D - x) -> (C + D) - x (Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c+d]) x) (Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c+d]) x) (Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x) (Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x) // C - (D - x) -> (C - D) + x (Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x) (Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x) (Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x) (Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x) // C - (D + x) -> (C - D) - x (Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x) (Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x) (Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x) (Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x) // C & (D & x) -> (C & D) & x (And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x) (And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x) (And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x)) => (And16 (Const16 <t> [c&d]) x) (And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x)) => (And8 (Const8 <t> [c&d]) x) // C | (D | x) -> (C | D) | x (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x)) => (Or64 (Const64 <t> [c|d]) x) (Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x)) => (Or32 (Const32 <t> [c|d]) x) (Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x)) => (Or16 (Const16 <t> [c|d]) x) (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x)) => (Or8 (Const8 <t> [c|d]) x) // C ^ (D ^ x) -> (C ^ D) ^ x (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x)) => (Xor64 (Const64 <t> [c^d]) x) (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x)) => (Xor32 (Const32 <t> [c^d]) x) (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x)) => (Xor16 (Const16 <t> [c^d]) x) (Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x)) => (Xor8 (Const8 <t> [c^d]) x) // C * (D * x) = (C * D) * x (Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x)) => (Mul64 (Const64 <t> [c*d]) x) (Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x)) => (Mul32 (Const32 <t> [c*d]) x) (Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x)) => (Mul16 (Const16 <t> [c*d]) x) (Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) => (Mul8 (Const8 <t> [c*d]) x) // floating point optimizations (Mul(32|64)F x (Const(32|64)F [1])) => x (Mul32F x (Const32F [-1])) => (Neg32F x) (Mul64F x (Const64F [-1])) => (Neg64F x) (Mul32F x (Const32F [2])) => (Add32F x x) (Mul64F x (Const64F [2])) => (Add64F x x) (Div32F x (Const32F <t> [c])) && reciprocalExact32(c) => (Mul32F x (Const32F <t> [1/c])) (Div64F x (Const64F <t> [c])) && reciprocalExact64(c) => (Mul64F x (Const64F <t> [1/c])) // rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))" (Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x) (Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)]) // for rewriting results of some late-expanded rewrites (below) (SelectN [0] (MakeResult x ___)) => x (SelectN [1] (MakeResult x y ___)) => y (SelectN [2] (MakeResult x y z ___)) => z // for late-expanded calls, recognize newobject and remove zeroing and nilchecks (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) && isSameCall(call.Aux, "runtime.newobject") => mem (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) && isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") => mem (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) && isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") => mem (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) && isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr // Addresses of globals are always non-nil. (NilCheck ptr:(Addr {_} (SB)) _) => ptr (NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr // for late-expanded calls, recognize memequal applied to a single constant byte // Support is limited by 1, 2, 4, 8 byte sizes (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem) (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem) (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8 => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem) && isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8 => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) (StaticLECall {callAux} _ _ (Const64 [0]) mem) && isSameCall(callAux, "runtime.memequal") => (MakeResult (ConstBool <typ.Bool> [true]) mem) (Static(Call|LECall) {callAux} p q _ mem) && isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q) => (MakeResult (ConstBool <typ.Bool> [true]) mem) // Turn known-size calls to memclrNoHeapPointers into a Zero. // Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details. (SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem)) && isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call) => (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem) // Recognise make([]T, 0) and replace it with a pointer to the zerobase (StaticLECall {callAux} _ (Const(64|32) [0]) (Const(64|32) [0]) mem) && isSameCall(callAux, "runtime.makeslice") => (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem) // Evaluate constant address comparisons. (EqPtr x x) => (ConstBool [true]) (NeqPtr x x) => (ConstBool [false]) (EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y]) (EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0]) (EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2]) (NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y]) (NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0]) (NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2]) (EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y]) (EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0]) (EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2]) (NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y]) (NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0]) (NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2]) (EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0]) (NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0]) (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2]) (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2]) (EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d]) (NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d]) (EqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x==y]) (NeqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x!=y]) (EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false]) (EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false]) (EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false]) (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false]) (NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true]) (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true]) (NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true]) (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true]) // Simplify address comparisons. (EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1)) (NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1) (EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p)) (NeqPtr (Const(32|64) [0]) p) => (IsNonNil p) (EqPtr (ConstNil) p) => (Not (IsNonNil p)) (NeqPtr (ConstNil) p) => (IsNonNil p) // Evaluate constant user nil checks. (IsNonNil (ConstNil)) => (ConstBool [false]) (IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0]) (IsNonNil (Addr _) ) => (ConstBool [true]) (IsNonNil (Convert (Addr _) _)) => (ConstBool [true]) (IsNonNil (LocalAddr _ _)) => (ConstBool [true]) // Inline small or disjoint runtime.memmove calls with constant length. // See the comment in op Move in genericOps.go for discussion of the type. // // Note that we've lost any knowledge of the type and alignment requirements // of the source and destination. We only know the size, and that the type // contains no pointers. // The type of the move is not necessarily v.Args[0].Type().Elem()! // See issue 55122 for details. // // Because expand calls runs after prove, constants useful to this pattern may not appear. // Both versions need to exist; the memory and register variants. // // Match post-expansion calls, memory version. (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) && sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call) => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) // Match post-expansion calls, register version. (SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem)) && sz >= 0 && call.Uses == 1 // this will exclude all calls with results && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) // Match pre-expansion calls. (SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem)) && sz >= 0 && call.Uses == 1 // this will exclude all calls with results && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call) => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) // De-virtualize late-expanded interface calls into late-expanded static calls. (InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___) => devirtLECall(v, fn.(*obj.LSym)) // Move and Zero optimizations. // Move source and destination may overlap. // Convert Moves into Zeros when the source is known to be zeros. (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2) => (Zero {t} [n] dst1 mem) (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0) => (Zero {t} [n] dst1 mem) (Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem) // Don't Store to variables that are about to be overwritten by Move/Zero. (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) && isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store) => (Zero {t1} [n] p1 mem) (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) && isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store) => (Move {t1} [n] dst1 src1 mem) // Don't Move to variables that are immediately completely overwritten. (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) && move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move) => (Zero {t} [n] dst1 mem) (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) && move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move) => (Move {t} [n] dst1 src1 mem) (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) && move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef) => (Zero {t} [n] dst1 (VarDef {x} mem)) (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) && move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef) => (Move {t} [n] dst1 src1 (VarDef {x} mem)) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem))) && m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3) => (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4) => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem))))) && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5) => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) // Don't Zero variables that are immediately completely overwritten // before being accessed. (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) && zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero) => (Move {t} [n] dst1 src1 mem) (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) && zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef) => (Move {t} [n] dst1 src1 (VarDef {x} mem)) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem))) && m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3) => (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem)))) && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4) => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem))))) && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5) => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) // Don't Move from memory if the values are likely to already be // in registers. (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem)) (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem))) (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem)))) // Same thing but with VarDef in the middle. (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem)) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem))) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem)))) // Prefer to Zero and Store than to Move. (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem)) (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _)))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem))) (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem)))) (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _)))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem))))) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _)))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem)) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem))) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _)))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem)))) (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _))))))) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size() => (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem))))) (SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x (SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x // When rewriting append to growslice, we use as the new length the result of // growslice so that we don't have to spill/restore the new length around the growslice call. // The exception here is that if the new length is a constant, avoiding spilling it // is pointless and its constantness is sometimes useful for subsequent optimizations. // See issue 56440. // Note there are 2 rules here, one for the pre-decomposed []T result and one for // the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.) (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _))) && isSameCall(sym, "runtime.growslice") => newLen (SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") => newLen // Collapse moving A -> B -> C into just A -> C. // Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible. // This happens most commonly when B is an autotmp inserted earlier // during compilation to ensure correctness. // Take care that overlapping moves are preserved. // Restrict this optimization to the stack, to avoid duplicating loads from the heap; // see CL 145208 for discussion. (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) && t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) => (Move {t1} [s] dst src midmem) // Same, but for large types that require VarDefs. (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) && t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) => (Move {t1} [s] dst src midmem) // Don't zero the same bits twice. (Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero (Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef // Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go). // However, this rule is needed to prevent the previous rule from looping forever in such cases. (Move dst src mem) && isSamePtr(dst, src) => mem // Constant rotate detection. ((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z) ((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z) ((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z) ((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z) // Non-constant rotate detection. // We use shiftIsBounded to make sure that neither of the shifts are >64. // Note: these rules are subtle when the shift amounts are 0/64, as Go shifts // are different from most native shifts. But it works out. ((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) ((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) ((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) ((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) ((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) ((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) ((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) ((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) ((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) ((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) ((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) ((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) ((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) ((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) ((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) ((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) ((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) ((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) ((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) ((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) ((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) ((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) ((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) ((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) ((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) ((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) ((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) ((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) ((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) ((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) ((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) ((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) // Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y. (RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y) (RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y) (RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y) (RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y) // Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y. (RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y)) // Rotating by y+c, with c a multiple of the value width, is the same as rotating by y. (RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y) (RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y) (RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y) (RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y) // Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y. (RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y)) (RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y)) // Ensure we don't do Const64 rotates in a 32-bit system. (RotateLeft64 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 <t> [int32(c)])) (RotateLeft32 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 <t> [int32(c)])) (RotateLeft16 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 <t> [int32(c)])) (RotateLeft8 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 <t> [int32(c)])) // Rotating by c, then by d, is the same as rotating by c+d. // We're trading a rotate for an add, which seems generally a good choice. It is especially good when c and d are constants. // This rule is a bit tricky as c and d might be different widths. We handle only cases where they are the same width. (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 8 && d.Type.Size() == 8 => (RotateLeft(64|32|16|8) x (Add64 <c.Type> c d)) (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 4 && d.Type.Size() == 4 => (RotateLeft(64|32|16|8) x (Add32 <c.Type> c d)) (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d)) (RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d)) // Loading constant values from dictionaries and itabs. (Load <t> (OffPtr [off] (Addr {s} sb) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load <t> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load <t> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) // Loading constant values from runtime._type.hash. (Load <t> (OffPtr [off] (Addr {sym} _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) (Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) (Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) // Calling cmpstring a second time with the same arguments in the // same memory state can reuse the results of the first call. // See issue 61725. // Note that this could pretty easily generalize to any pure function. (SelectN [0] (StaticLECall {f} x y (SelectN [1] c:(StaticLECall {g} x y mem)))) && isSameCall(f, "runtime.cmpstring") && isSameCall(g, "runtime.cmpstring") => @c.Block (SelectN [0] <typ.Int> c) // If we don't use the result of cmpstring, might as well not call it. // Note that this could pretty easily generalize to any pure function. (SelectN [1] c:(StaticLECall {f} _ _ mem)) && c.Uses == 1 && isSameCall(f, "runtime.cmpstring") && clobber(c) => mem
go/src/cmd/compile/internal/ssa/_gen/generic.rules/0
{ "file_path": "go/src/cmd/compile/internal/ssa/_gen/generic.rules", "repo_id": "go", "token_count": 68632 }
101
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa // combine copyelim and phielim into a single pass. // copyelim removes all uses of OpCopy values from f. // A subsequent deadcode pass is needed to actually remove the copies. func copyelim(f *Func) { phielim(f) // loop of copyelimValue(v) process has been done in phielim() pass. // Update block control values. for _, b := range f.Blocks { for i, v := range b.ControlValues() { if v.Op == OpCopy { b.ReplaceControl(i, v.Args[0]) } } } // Update named values. for _, name := range f.Names { values := f.NamedValues[*name] for i, v := range values { if v.Op == OpCopy { values[i] = v.Args[0] } } } } // copySource returns the (non-copy) op which is the // ultimate source of v. v must be a copy op. func copySource(v *Value) *Value { w := v.Args[0] // This loop is just: // for w.Op == OpCopy { // w = w.Args[0] // } // but we take some extra care to make sure we // don't get stuck in an infinite loop. // Infinite copy loops may happen in unreachable code. // (TODO: or can they? Needs a test.) slow := w var advance bool for w.Op == OpCopy { w = w.Args[0] if w == slow { w.reset(OpUnknown) break } if advance { slow = slow.Args[0] } advance = !advance } // The answer is w. Update all the copies we saw // to point directly to w. Doing this update makes // sure that we don't end up doing O(n^2) work // for a chain of n copies. for v != w { x := v.Args[0] v.SetArg(0, w) v = x } return w } // copyelimValue ensures that no args of v are copies. func copyelimValue(v *Value) { for i, a := range v.Args { if a.Op == OpCopy { v.SetArg(i, copySource(a)) } } } // phielim eliminates redundant phi values from f. // A phi is redundant if its arguments are all equal. For // purposes of counting, ignore the phi itself. Both of // these phis are redundant: // // v = phi(x,x,x) // v = phi(x,v,x,v) // // We repeat this process to also catch situations like: // // v = phi(x, phi(x, x), phi(x, v)) // // TODO: Can we also simplify cases like: // // v = phi(v, w, x) // w = phi(v, w, x) // // and would that be useful? func phielim(f *Func) { for { change := false for _, b := range f.Blocks { for _, v := range b.Values { // This is an early place in SSA where all values are examined. // Rewrite all 0-sized Go values to remove accessors, dereferences, loads, etc. if t := v.Type; (t.IsStruct() || t.IsArray()) && t.Size() == 0 { if t.IsStruct() { v.reset(OpStructMake0) } else { v.reset(OpArrayMake0) } } // Modify all values so no arg (including args // of OpCopy) is a copy. copyelimValue(v) change = phielimValue(v) || change } } if !change { break } } } // phielimValue tries to convert the phi v to a copy. func phielimValue(v *Value) bool { if v.Op != OpPhi { return false } // If there are two distinct args of v which // are not v itself, then the phi must remain. // Otherwise, we can replace it with a copy. var w *Value for _, x := range v.Args { if x == v { continue } if x == w { continue } if w != nil { return false } w = x } if w == nil { // v references only itself. It must be in // a dead code loop. Don't bother modifying it. return false } v.Op = OpCopy v.SetArgs1(w) f := v.Block.Func if f.pass.debug > 0 { f.Warnl(v.Pos, "eliminated phi") } return true }
go/src/cmd/compile/internal/ssa/copyelim.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/copyelim.go", "repo_id": "go", "token_count": 1454 }
102
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa import ( "testing" "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm64" "cmd/internal/obj/s390x" "cmd/internal/obj/x86" "cmd/internal/src" "cmd/internal/sys" ) var CheckFunc = checkFunc var Opt = opt var Deadcode = deadcode var Copyelim = copyelim var testCtxts = map[string]*obj.Link{ "amd64": obj.Linknew(&x86.Linkamd64), "s390x": obj.Linknew(&s390x.Links390x), "arm64": obj.Linknew(&arm64.Linkarm64), } func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") } func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") } func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") } func testConfigArch(tb testing.TB, arch string) *Conf { ctxt, ok := testCtxts[arch] if !ok { tb.Fatalf("unknown arch %s", arch) } if ctxt.Arch.PtrSize != 8 { tb.Fatal("testTypes is 64-bit only") } c := &Conf{ config: NewConfig(arch, testTypes, ctxt, true, false), tb: tb, } return c } type Conf struct { config *Config tb testing.TB fe Frontend } func (c *Conf) Frontend() Frontend { if c.fe == nil { pkg := types.NewPkg("my/import/path", "path") fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup("function"), types.NewSignature(nil, nil, nil)) fn.DeclareParams(true) fn.LSym = &obj.LSym{Name: "my/import/path.function"} c.fe = TestFrontend{ t: c.tb, ctxt: c.config.ctxt, f: fn, } } return c.fe } func (c *Conf) Temp(typ *types.Type) *ir.Name { n := ir.NewNameAt(src.NoXPos, &types.Sym{Name: "aFakeAuto"}, typ) n.Class = ir.PAUTO return n } // TestFrontend is a test-only frontend. // It assumes 64 bit integers and pointers. type TestFrontend struct { t testing.TB ctxt *obj.Link f *ir.Func } func (TestFrontend) StringData(s string) *obj.LSym { return nil } func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot { return LocalSlot{N: parent.N, Type: t, Off: offset} } func (d TestFrontend) Syslook(s string) *obj.LSym { return d.ctxt.Lookup(s) } func (TestFrontend) UseWriteBarrier() bool { return true // only writebarrier_test cares } func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } func (d TestFrontend) Log() bool { return true } func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } func (d TestFrontend) Debug_checknil() bool { return false } func (d TestFrontend) Func() *ir.Func { return d.f } var testTypes Types func init() { // TODO(mdempsky): Push into types.InitUniverse or typecheck.InitUniverse. types.PtrSize = 8 types.RegSize = 8 types.MaxWidth = 1 << 50 base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}} typecheck.InitUniverse() testTypes.SetTypPtrs() }
go/src/cmd/compile/internal/ssa/export_test.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/export_test.go", "repo_id": "go", "token_count": 1352 }
103
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa import ( "math/bits" ) // Code to compute lowest common ancestors in the dominator tree. // https://en.wikipedia.org/wiki/Lowest_common_ancestor // https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space // lcaRange is a data structure that can compute lowest common ancestor queries // in O(n lg n) precomputed space and O(1) time per query. type lcaRange struct { // Additional information about each block (indexed by block ID). blocks []lcaRangeBlock // Data structure for range minimum queries. // rangeMin[k][i] contains the ID of the minimum depth block // in the Euler tour from positions i to i+1<<k-1, inclusive. rangeMin [][]ID } type lcaRangeBlock struct { b *Block parent ID // parent in dominator tree. 0 = no parent (entry or unreachable) firstChild ID // first child in dominator tree sibling ID // next child of parent pos int32 // an index in the Euler tour where this block appears (any one of its occurrences) depth int32 // depth in dominator tree (root=0, its children=1, etc.) } func makeLCArange(f *Func) *lcaRange { dom := f.Idom() // Build tree blocks := make([]lcaRangeBlock, f.NumBlocks()) for _, b := range f.Blocks { blocks[b.ID].b = b if dom[b.ID] == nil { continue // entry or unreachable } parent := dom[b.ID].ID blocks[b.ID].parent = parent blocks[b.ID].sibling = blocks[parent].firstChild blocks[parent].firstChild = b.ID } // Compute euler tour ordering. // Each reachable block will appear #children+1 times in the tour. tour := make([]ID, 0, f.NumBlocks()*2-1) type queueEntry struct { bid ID // block to work on cid ID // child we're already working on (0 = haven't started yet) } q := []queueEntry{{f.Entry.ID, 0}} for len(q) > 0 { n := len(q) - 1 bid := q[n].bid cid := q[n].cid q = q[:n] // Add block to tour. blocks[bid].pos = int32(len(tour)) tour = append(tour, bid) // Proceed down next child edge (if any). if cid == 0 { // This is our first visit to b. Set its depth. blocks[bid].depth = blocks[blocks[bid].parent].depth + 1 // Then explore its first child. cid = blocks[bid].firstChild } else { // We've seen b before. Explore the next child. cid = blocks[cid].sibling } if cid != 0 { q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0}) } } // Compute fast range-minimum query data structure rangeMin := make([][]ID, 0, bits.Len64(uint64(len(tour)))) rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself. for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 { r := make([]ID, len(tour)-s+1) for i := 0; i < len(tour)-s+1; i++ { bid := rangeMin[logS-1][i] bid2 := rangeMin[logS-1][i+s/2] if blocks[bid2].depth < blocks[bid].depth { bid = bid2 } r[i] = bid } rangeMin = append(rangeMin, r) } return &lcaRange{blocks: blocks, rangeMin: rangeMin} } // find returns the lowest common ancestor of a and b. func (lca *lcaRange) find(a, b *Block) *Block { if a == b { return a } // Find the positions of a and b in the Euler tour. p1 := lca.blocks[a.ID].pos p2 := lca.blocks[b.ID].pos if p1 > p2 { p1, p2 = p2, p1 } // The lowest common ancestor is the minimum depth block // on the tour from p1 to p2. We've precomputed minimum // depth blocks for powers-of-two subsequences of the tour. // Combine the right two precomputed values to get the answer. logS := uint(log64(int64(p2 - p1))) bid1 := lca.rangeMin[logS][p1] bid2 := lca.rangeMin[logS][p2-1<<logS+1] if lca.blocks[bid1].depth < lca.blocks[bid2].depth { return lca.blocks[bid1].b } return lca.blocks[bid2].b }
go/src/cmd/compile/internal/ssa/lca.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/lca.go", "repo_id": "go", "token_count": 1478 }
104
// Code generated from _gen/ARM64.rules using 'go generate'; DO NOT EDIT. package ssa import "cmd/compile/internal/types" func rewriteValueARM64(v *Value) bool { switch v.Op { case OpARM64ADCSflags: return rewriteValueARM64_OpARM64ADCSflags(v) case OpARM64ADD: return rewriteValueARM64_OpARM64ADD(v) case OpARM64ADDSflags: return rewriteValueARM64_OpARM64ADDSflags(v) case OpARM64ADDconst: return rewriteValueARM64_OpARM64ADDconst(v) case OpARM64ADDshiftLL: return rewriteValueARM64_OpARM64ADDshiftLL(v) case OpARM64ADDshiftRA: return rewriteValueARM64_OpARM64ADDshiftRA(v) case OpARM64ADDshiftRL: return rewriteValueARM64_OpARM64ADDshiftRL(v) case OpARM64AND: return rewriteValueARM64_OpARM64AND(v) case OpARM64ANDconst: return rewriteValueARM64_OpARM64ANDconst(v) case OpARM64ANDshiftLL: return rewriteValueARM64_OpARM64ANDshiftLL(v) case OpARM64ANDshiftRA: return rewriteValueARM64_OpARM64ANDshiftRA(v) case OpARM64ANDshiftRL: return rewriteValueARM64_OpARM64ANDshiftRL(v) case OpARM64ANDshiftRO: return rewriteValueARM64_OpARM64ANDshiftRO(v) case OpARM64BIC: return rewriteValueARM64_OpARM64BIC(v) case OpARM64BICshiftLL: return rewriteValueARM64_OpARM64BICshiftLL(v) case OpARM64BICshiftRA: return rewriteValueARM64_OpARM64BICshiftRA(v) case OpARM64BICshiftRL: return rewriteValueARM64_OpARM64BICshiftRL(v) case OpARM64BICshiftRO: return rewriteValueARM64_OpARM64BICshiftRO(v) case OpARM64CMN: return rewriteValueARM64_OpARM64CMN(v) case OpARM64CMNW: return rewriteValueARM64_OpARM64CMNW(v) case OpARM64CMNWconst: return rewriteValueARM64_OpARM64CMNWconst(v) case OpARM64CMNconst: return rewriteValueARM64_OpARM64CMNconst(v) case OpARM64CMNshiftLL: return rewriteValueARM64_OpARM64CMNshiftLL(v) case OpARM64CMNshiftRA: return rewriteValueARM64_OpARM64CMNshiftRA(v) case OpARM64CMNshiftRL: return rewriteValueARM64_OpARM64CMNshiftRL(v) case OpARM64CMP: return rewriteValueARM64_OpARM64CMP(v) case OpARM64CMPW: return rewriteValueARM64_OpARM64CMPW(v) case OpARM64CMPWconst: return rewriteValueARM64_OpARM64CMPWconst(v) case OpARM64CMPconst: return rewriteValueARM64_OpARM64CMPconst(v) case OpARM64CMPshiftLL: return rewriteValueARM64_OpARM64CMPshiftLL(v) case OpARM64CMPshiftRA: return rewriteValueARM64_OpARM64CMPshiftRA(v) case OpARM64CMPshiftRL: return rewriteValueARM64_OpARM64CMPshiftRL(v) case OpARM64CSEL: return rewriteValueARM64_OpARM64CSEL(v) case OpARM64CSEL0: return rewriteValueARM64_OpARM64CSEL0(v) case OpARM64CSETM: return rewriteValueARM64_OpARM64CSETM(v) case OpARM64CSINC: return rewriteValueARM64_OpARM64CSINC(v) case OpARM64CSINV: return rewriteValueARM64_OpARM64CSINV(v) case OpARM64CSNEG: return rewriteValueARM64_OpARM64CSNEG(v) case OpARM64DIV: return rewriteValueARM64_OpARM64DIV(v) case OpARM64DIVW: return rewriteValueARM64_OpARM64DIVW(v) case OpARM64EON: return rewriteValueARM64_OpARM64EON(v) case OpARM64EONshiftLL: return rewriteValueARM64_OpARM64EONshiftLL(v) case OpARM64EONshiftRA: return rewriteValueARM64_OpARM64EONshiftRA(v) case OpARM64EONshiftRL: return rewriteValueARM64_OpARM64EONshiftRL(v) case OpARM64EONshiftRO: return rewriteValueARM64_OpARM64EONshiftRO(v) case OpARM64Equal: return rewriteValueARM64_OpARM64Equal(v) case OpARM64FADDD: return rewriteValueARM64_OpARM64FADDD(v) case OpARM64FADDS: return rewriteValueARM64_OpARM64FADDS(v) case OpARM64FCMPD: return rewriteValueARM64_OpARM64FCMPD(v) case OpARM64FCMPS: return rewriteValueARM64_OpARM64FCMPS(v) case OpARM64FMOVDfpgp: return rewriteValueARM64_OpARM64FMOVDfpgp(v) case OpARM64FMOVDgpfp: return rewriteValueARM64_OpARM64FMOVDgpfp(v) case OpARM64FMOVDload: return rewriteValueARM64_OpARM64FMOVDload(v) case OpARM64FMOVDloadidx: return rewriteValueARM64_OpARM64FMOVDloadidx(v) case OpARM64FMOVDloadidx8: return rewriteValueARM64_OpARM64FMOVDloadidx8(v) case OpARM64FMOVDstore: return rewriteValueARM64_OpARM64FMOVDstore(v) case OpARM64FMOVDstoreidx: return rewriteValueARM64_OpARM64FMOVDstoreidx(v) case OpARM64FMOVDstoreidx8: return rewriteValueARM64_OpARM64FMOVDstoreidx8(v) case OpARM64FMOVSload: return rewriteValueARM64_OpARM64FMOVSload(v) case OpARM64FMOVSloadidx: return rewriteValueARM64_OpARM64FMOVSloadidx(v) case OpARM64FMOVSloadidx4: return rewriteValueARM64_OpARM64FMOVSloadidx4(v) case OpARM64FMOVSstore: return rewriteValueARM64_OpARM64FMOVSstore(v) case OpARM64FMOVSstoreidx: return rewriteValueARM64_OpARM64FMOVSstoreidx(v) case OpARM64FMOVSstoreidx4: return rewriteValueARM64_OpARM64FMOVSstoreidx4(v) case OpARM64FMULD: return rewriteValueARM64_OpARM64FMULD(v) case OpARM64FMULS: return rewriteValueARM64_OpARM64FMULS(v) case OpARM64FNEGD: return rewriteValueARM64_OpARM64FNEGD(v) case OpARM64FNEGS: return rewriteValueARM64_OpARM64FNEGS(v) case OpARM64FNMULD: return rewriteValueARM64_OpARM64FNMULD(v) case OpARM64FNMULS: return rewriteValueARM64_OpARM64FNMULS(v) case OpARM64FSUBD: return rewriteValueARM64_OpARM64FSUBD(v) case OpARM64FSUBS: return rewriteValueARM64_OpARM64FSUBS(v) case OpARM64GreaterEqual: return rewriteValueARM64_OpARM64GreaterEqual(v) case OpARM64GreaterEqualF: return rewriteValueARM64_OpARM64GreaterEqualF(v) case OpARM64GreaterEqualNoov: return rewriteValueARM64_OpARM64GreaterEqualNoov(v) case OpARM64GreaterEqualU: return rewriteValueARM64_OpARM64GreaterEqualU(v) case OpARM64GreaterThan: return rewriteValueARM64_OpARM64GreaterThan(v) case OpARM64GreaterThanF: return rewriteValueARM64_OpARM64GreaterThanF(v) case OpARM64GreaterThanU: return rewriteValueARM64_OpARM64GreaterThanU(v) case OpARM64LDP: return rewriteValueARM64_OpARM64LDP(v) case OpARM64LessEqual: return rewriteValueARM64_OpARM64LessEqual(v) case OpARM64LessEqualF: return rewriteValueARM64_OpARM64LessEqualF(v) case OpARM64LessEqualU: return rewriteValueARM64_OpARM64LessEqualU(v) case OpARM64LessThan: return rewriteValueARM64_OpARM64LessThan(v) case OpARM64LessThanF: return rewriteValueARM64_OpARM64LessThanF(v) case OpARM64LessThanNoov: return rewriteValueARM64_OpARM64LessThanNoov(v) case OpARM64LessThanU: return rewriteValueARM64_OpARM64LessThanU(v) case OpARM64MADD: return rewriteValueARM64_OpARM64MADD(v) case OpARM64MADDW: return rewriteValueARM64_OpARM64MADDW(v) case OpARM64MNEG: return rewriteValueARM64_OpARM64MNEG(v) case OpARM64MNEGW: return rewriteValueARM64_OpARM64MNEGW(v) case OpARM64MOD: return rewriteValueARM64_OpARM64MOD(v) case OpARM64MODW: return rewriteValueARM64_OpARM64MODW(v) case OpARM64MOVBUload: return rewriteValueARM64_OpARM64MOVBUload(v) case OpARM64MOVBUloadidx: return rewriteValueARM64_OpARM64MOVBUloadidx(v) case OpARM64MOVBUreg: return rewriteValueARM64_OpARM64MOVBUreg(v) case OpARM64MOVBload: return rewriteValueARM64_OpARM64MOVBload(v) case OpARM64MOVBloadidx: return rewriteValueARM64_OpARM64MOVBloadidx(v) case OpARM64MOVBreg: return rewriteValueARM64_OpARM64MOVBreg(v) case OpARM64MOVBstore: return rewriteValueARM64_OpARM64MOVBstore(v) case OpARM64MOVBstoreidx: return rewriteValueARM64_OpARM64MOVBstoreidx(v) case OpARM64MOVBstorezero: return rewriteValueARM64_OpARM64MOVBstorezero(v) case OpARM64MOVBstorezeroidx: return rewriteValueARM64_OpARM64MOVBstorezeroidx(v) case OpARM64MOVDload: return rewriteValueARM64_OpARM64MOVDload(v) case OpARM64MOVDloadidx: return rewriteValueARM64_OpARM64MOVDloadidx(v) case OpARM64MOVDloadidx8: return rewriteValueARM64_OpARM64MOVDloadidx8(v) case OpARM64MOVDnop: return rewriteValueARM64_OpARM64MOVDnop(v) case OpARM64MOVDreg: return rewriteValueARM64_OpARM64MOVDreg(v) case OpARM64MOVDstore: return rewriteValueARM64_OpARM64MOVDstore(v) case OpARM64MOVDstoreidx: return rewriteValueARM64_OpARM64MOVDstoreidx(v) case OpARM64MOVDstoreidx8: return rewriteValueARM64_OpARM64MOVDstoreidx8(v) case OpARM64MOVDstorezero: return rewriteValueARM64_OpARM64MOVDstorezero(v) case OpARM64MOVDstorezeroidx: return rewriteValueARM64_OpARM64MOVDstorezeroidx(v) case OpARM64MOVDstorezeroidx8: return rewriteValueARM64_OpARM64MOVDstorezeroidx8(v) case OpARM64MOVHUload: return rewriteValueARM64_OpARM64MOVHUload(v) case OpARM64MOVHUloadidx: return rewriteValueARM64_OpARM64MOVHUloadidx(v) case OpARM64MOVHUloadidx2: return rewriteValueARM64_OpARM64MOVHUloadidx2(v) case OpARM64MOVHUreg: return rewriteValueARM64_OpARM64MOVHUreg(v) case OpARM64MOVHload: return rewriteValueARM64_OpARM64MOVHload(v) case OpARM64MOVHloadidx: return rewriteValueARM64_OpARM64MOVHloadidx(v) case OpARM64MOVHloadidx2: return rewriteValueARM64_OpARM64MOVHloadidx2(v) case OpARM64MOVHreg: return rewriteValueARM64_OpARM64MOVHreg(v) case OpARM64MOVHstore: return rewriteValueARM64_OpARM64MOVHstore(v) case OpARM64MOVHstoreidx: return rewriteValueARM64_OpARM64MOVHstoreidx(v) case OpARM64MOVHstoreidx2: return rewriteValueARM64_OpARM64MOVHstoreidx2(v) case OpARM64MOVHstorezero: return rewriteValueARM64_OpARM64MOVHstorezero(v) case OpARM64MOVHstorezeroidx: return rewriteValueARM64_OpARM64MOVHstorezeroidx(v) case OpARM64MOVHstorezeroidx2: return rewriteValueARM64_OpARM64MOVHstorezeroidx2(v) case OpARM64MOVQstorezero: return rewriteValueARM64_OpARM64MOVQstorezero(v) case OpARM64MOVWUload: return rewriteValueARM64_OpARM64MOVWUload(v) case OpARM64MOVWUloadidx: return rewriteValueARM64_OpARM64MOVWUloadidx(v) case OpARM64MOVWUloadidx4: return rewriteValueARM64_OpARM64MOVWUloadidx4(v) case OpARM64MOVWUreg: return rewriteValueARM64_OpARM64MOVWUreg(v) case OpARM64MOVWload: return rewriteValueARM64_OpARM64MOVWload(v) case OpARM64MOVWloadidx: return rewriteValueARM64_OpARM64MOVWloadidx(v) case OpARM64MOVWloadidx4: return rewriteValueARM64_OpARM64MOVWloadidx4(v) case OpARM64MOVWreg: return rewriteValueARM64_OpARM64MOVWreg(v) case OpARM64MOVWstore: return rewriteValueARM64_OpARM64MOVWstore(v) case OpARM64MOVWstoreidx: return rewriteValueARM64_OpARM64MOVWstoreidx(v) case OpARM64MOVWstoreidx4: return rewriteValueARM64_OpARM64MOVWstoreidx4(v) case OpARM64MOVWstorezero: return rewriteValueARM64_OpARM64MOVWstorezero(v) case OpARM64MOVWstorezeroidx: return rewriteValueARM64_OpARM64MOVWstorezeroidx(v) case OpARM64MOVWstorezeroidx4: return rewriteValueARM64_OpARM64MOVWstorezeroidx4(v) case OpARM64MSUB: return rewriteValueARM64_OpARM64MSUB(v) case OpARM64MSUBW: return rewriteValueARM64_OpARM64MSUBW(v) case OpARM64MUL: return rewriteValueARM64_OpARM64MUL(v) case OpARM64MULW: return rewriteValueARM64_OpARM64MULW(v) case OpARM64MVN: return rewriteValueARM64_OpARM64MVN(v) case OpARM64MVNshiftLL: return rewriteValueARM64_OpARM64MVNshiftLL(v) case OpARM64MVNshiftRA: return rewriteValueARM64_OpARM64MVNshiftRA(v) case OpARM64MVNshiftRL: return rewriteValueARM64_OpARM64MVNshiftRL(v) case OpARM64MVNshiftRO: return rewriteValueARM64_OpARM64MVNshiftRO(v) case OpARM64NEG: return rewriteValueARM64_OpARM64NEG(v) case OpARM64NEGshiftLL: return rewriteValueARM64_OpARM64NEGshiftLL(v) case OpARM64NEGshiftRA: return rewriteValueARM64_OpARM64NEGshiftRA(v) case OpARM64NEGshiftRL: return rewriteValueARM64_OpARM64NEGshiftRL(v) case OpARM64NotEqual: return rewriteValueARM64_OpARM64NotEqual(v) case OpARM64OR: return rewriteValueARM64_OpARM64OR(v) case OpARM64ORN: return rewriteValueARM64_OpARM64ORN(v) case OpARM64ORNshiftLL: return rewriteValueARM64_OpARM64ORNshiftLL(v) case OpARM64ORNshiftRA: return rewriteValueARM64_OpARM64ORNshiftRA(v) case OpARM64ORNshiftRL: return rewriteValueARM64_OpARM64ORNshiftRL(v) case OpARM64ORNshiftRO: return rewriteValueARM64_OpARM64ORNshiftRO(v) case OpARM64ORconst: return rewriteValueARM64_OpARM64ORconst(v) case OpARM64ORshiftLL: return rewriteValueARM64_OpARM64ORshiftLL(v) case OpARM64ORshiftRA: return rewriteValueARM64_OpARM64ORshiftRA(v) case OpARM64ORshiftRL: return rewriteValueARM64_OpARM64ORshiftRL(v) case OpARM64ORshiftRO: return rewriteValueARM64_OpARM64ORshiftRO(v) case OpARM64REV: return rewriteValueARM64_OpARM64REV(v) case OpARM64REVW: return rewriteValueARM64_OpARM64REVW(v) case OpARM64ROR: return rewriteValueARM64_OpARM64ROR(v) case OpARM64RORW: return rewriteValueARM64_OpARM64RORW(v) case OpARM64SBCSflags: return rewriteValueARM64_OpARM64SBCSflags(v) case OpARM64SLL: return rewriteValueARM64_OpARM64SLL(v) case OpARM64SLLconst: return rewriteValueARM64_OpARM64SLLconst(v) case OpARM64SRA: return rewriteValueARM64_OpARM64SRA(v) case OpARM64SRAconst: return rewriteValueARM64_OpARM64SRAconst(v) case OpARM64SRL: return rewriteValueARM64_OpARM64SRL(v) case OpARM64SRLconst: return rewriteValueARM64_OpARM64SRLconst(v) case OpARM64STP: return rewriteValueARM64_OpARM64STP(v) case OpARM64SUB: return rewriteValueARM64_OpARM64SUB(v) case OpARM64SUBconst: return rewriteValueARM64_OpARM64SUBconst(v) case OpARM64SUBshiftLL: return rewriteValueARM64_OpARM64SUBshiftLL(v) case OpARM64SUBshiftRA: return rewriteValueARM64_OpARM64SUBshiftRA(v) case OpARM64SUBshiftRL: return rewriteValueARM64_OpARM64SUBshiftRL(v) case OpARM64TST: return rewriteValueARM64_OpARM64TST(v) case OpARM64TSTW: return rewriteValueARM64_OpARM64TSTW(v) case OpARM64TSTWconst: return rewriteValueARM64_OpARM64TSTWconst(v) case OpARM64TSTconst: return rewriteValueARM64_OpARM64TSTconst(v) case OpARM64TSTshiftLL: return rewriteValueARM64_OpARM64TSTshiftLL(v) case OpARM64TSTshiftRA: return rewriteValueARM64_OpARM64TSTshiftRA(v) case OpARM64TSTshiftRL: return rewriteValueARM64_OpARM64TSTshiftRL(v) case OpARM64TSTshiftRO: return rewriteValueARM64_OpARM64TSTshiftRO(v) case OpARM64UBFIZ: return rewriteValueARM64_OpARM64UBFIZ(v) case OpARM64UBFX: return rewriteValueARM64_OpARM64UBFX(v) case OpARM64UDIV: return rewriteValueARM64_OpARM64UDIV(v) case OpARM64UDIVW: return rewriteValueARM64_OpARM64UDIVW(v) case OpARM64UMOD: return rewriteValueARM64_OpARM64UMOD(v) case OpARM64UMODW: return rewriteValueARM64_OpARM64UMODW(v) case OpARM64XOR: return rewriteValueARM64_OpARM64XOR(v) case OpARM64XORconst: return rewriteValueARM64_OpARM64XORconst(v) case OpARM64XORshiftLL: return rewriteValueARM64_OpARM64XORshiftLL(v) case OpARM64XORshiftRA: return rewriteValueARM64_OpARM64XORshiftRA(v) case OpARM64XORshiftRL: return rewriteValueARM64_OpARM64XORshiftRL(v) case OpARM64XORshiftRO: return rewriteValueARM64_OpARM64XORshiftRO(v) case OpAbs: v.Op = OpARM64FABSD return true case OpAdd16: v.Op = OpARM64ADD return true case OpAdd32: v.Op = OpARM64ADD return true case OpAdd32F: v.Op = OpARM64FADDS return true case OpAdd64: v.Op = OpARM64ADD return true case OpAdd64F: v.Op = OpARM64FADDD return true case OpAdd8: v.Op = OpARM64ADD return true case OpAddPtr: v.Op = OpARM64ADD return true case OpAddr: return rewriteValueARM64_OpAddr(v) case OpAnd16: v.Op = OpARM64AND return true case OpAnd32: v.Op = OpARM64AND return true case OpAnd64: v.Op = OpARM64AND return true case OpAnd8: v.Op = OpARM64AND return true case OpAndB: v.Op = OpARM64AND return true case OpAtomicAdd32: v.Op = OpARM64LoweredAtomicAdd32 return true case OpAtomicAdd32Variant: v.Op = OpARM64LoweredAtomicAdd32Variant return true case OpAtomicAdd64: v.Op = OpARM64LoweredAtomicAdd64 return true case OpAtomicAdd64Variant: v.Op = OpARM64LoweredAtomicAdd64Variant return true case OpAtomicAnd32value: v.Op = OpARM64LoweredAtomicAnd32 return true case OpAtomicAnd32valueVariant: v.Op = OpARM64LoweredAtomicAnd32Variant return true case OpAtomicAnd64value: v.Op = OpARM64LoweredAtomicAnd64 return true case OpAtomicAnd64valueVariant: v.Op = OpARM64LoweredAtomicAnd64Variant return true case OpAtomicAnd8value: v.Op = OpARM64LoweredAtomicAnd8 return true case OpAtomicAnd8valueVariant: v.Op = OpARM64LoweredAtomicAnd8Variant return true case OpAtomicCompareAndSwap32: v.Op = OpARM64LoweredAtomicCas32 return true case OpAtomicCompareAndSwap32Variant: v.Op = OpARM64LoweredAtomicCas32Variant return true case OpAtomicCompareAndSwap64: v.Op = OpARM64LoweredAtomicCas64 return true case OpAtomicCompareAndSwap64Variant: v.Op = OpARM64LoweredAtomicCas64Variant return true case OpAtomicExchange32: v.Op = OpARM64LoweredAtomicExchange32 return true case OpAtomicExchange32Variant: v.Op = OpARM64LoweredAtomicExchange32Variant return true case OpAtomicExchange64: v.Op = OpARM64LoweredAtomicExchange64 return true case OpAtomicExchange64Variant: v.Op = OpARM64LoweredAtomicExchange64Variant return true case OpAtomicLoad32: v.Op = OpARM64LDARW return true case OpAtomicLoad64: v.Op = OpARM64LDAR return true case OpAtomicLoad8: v.Op = OpARM64LDARB return true case OpAtomicLoadPtr: v.Op = OpARM64LDAR return true case OpAtomicOr32value: v.Op = OpARM64LoweredAtomicOr32 return true case OpAtomicOr32valueVariant: v.Op = OpARM64LoweredAtomicOr32Variant return true case OpAtomicOr64value: v.Op = OpARM64LoweredAtomicOr64 return true case OpAtomicOr64valueVariant: v.Op = OpARM64LoweredAtomicOr64Variant return true case OpAtomicOr8value: v.Op = OpARM64LoweredAtomicOr8 return true case OpAtomicOr8valueVariant: v.Op = OpARM64LoweredAtomicOr8Variant return true case OpAtomicStore32: v.Op = OpARM64STLRW return true case OpAtomicStore64: v.Op = OpARM64STLR return true case OpAtomicStore8: v.Op = OpARM64STLRB return true case OpAtomicStorePtrNoWB: v.Op = OpARM64STLR return true case OpAvg64u: return rewriteValueARM64_OpAvg64u(v) case OpBitLen32: return rewriteValueARM64_OpBitLen32(v) case OpBitLen64: return rewriteValueARM64_OpBitLen64(v) case OpBitRev16: return rewriteValueARM64_OpBitRev16(v) case OpBitRev32: v.Op = OpARM64RBITW return true case OpBitRev64: v.Op = OpARM64RBIT return true case OpBitRev8: return rewriteValueARM64_OpBitRev8(v) case OpBswap16: v.Op = OpARM64REV16W return true case OpBswap32: v.Op = OpARM64REVW return true case OpBswap64: v.Op = OpARM64REV return true case OpCeil: v.Op = OpARM64FRINTPD return true case OpClosureCall: v.Op = OpARM64CALLclosure return true case OpCom16: v.Op = OpARM64MVN return true case OpCom32: v.Op = OpARM64MVN return true case OpCom64: v.Op = OpARM64MVN return true case OpCom8: v.Op = OpARM64MVN return true case OpCondSelect: return rewriteValueARM64_OpCondSelect(v) case OpConst16: return rewriteValueARM64_OpConst16(v) case OpConst32: return rewriteValueARM64_OpConst32(v) case OpConst32F: return rewriteValueARM64_OpConst32F(v) case OpConst64: return rewriteValueARM64_OpConst64(v) case OpConst64F: return rewriteValueARM64_OpConst64F(v) case OpConst8: return rewriteValueARM64_OpConst8(v) case OpConstBool: return rewriteValueARM64_OpConstBool(v) case OpConstNil: return rewriteValueARM64_OpConstNil(v) case OpCtz16: return rewriteValueARM64_OpCtz16(v) case OpCtz16NonZero: v.Op = OpCtz32 return true case OpCtz32: return rewriteValueARM64_OpCtz32(v) case OpCtz32NonZero: v.Op = OpCtz32 return true case OpCtz64: return rewriteValueARM64_OpCtz64(v) case OpCtz64NonZero: v.Op = OpCtz64 return true case OpCtz8: return rewriteValueARM64_OpCtz8(v) case OpCtz8NonZero: v.Op = OpCtz32 return true case OpCvt32Fto32: v.Op = OpARM64FCVTZSSW return true case OpCvt32Fto32U: v.Op = OpARM64FCVTZUSW return true case OpCvt32Fto64: v.Op = OpARM64FCVTZSS return true case OpCvt32Fto64F: v.Op = OpARM64FCVTSD return true case OpCvt32Fto64U: v.Op = OpARM64FCVTZUS return true case OpCvt32Uto32F: v.Op = OpARM64UCVTFWS return true case OpCvt32Uto64F: v.Op = OpARM64UCVTFWD return true case OpCvt32to32F: v.Op = OpARM64SCVTFWS return true case OpCvt32to64F: v.Op = OpARM64SCVTFWD return true case OpCvt64Fto32: v.Op = OpARM64FCVTZSDW return true case OpCvt64Fto32F: v.Op = OpARM64FCVTDS return true case OpCvt64Fto32U: v.Op = OpARM64FCVTZUDW return true case OpCvt64Fto64: v.Op = OpARM64FCVTZSD return true case OpCvt64Fto64U: v.Op = OpARM64FCVTZUD return true case OpCvt64Uto32F: v.Op = OpARM64UCVTFS return true case OpCvt64Uto64F: v.Op = OpARM64UCVTFD return true case OpCvt64to32F: v.Op = OpARM64SCVTFS return true case OpCvt64to64F: v.Op = OpARM64SCVTFD return true case OpCvtBoolToUint8: v.Op = OpCopy return true case OpDiv16: return rewriteValueARM64_OpDiv16(v) case OpDiv16u: return rewriteValueARM64_OpDiv16u(v) case OpDiv32: return rewriteValueARM64_OpDiv32(v) case OpDiv32F: v.Op = OpARM64FDIVS return true case OpDiv32u: v.Op = OpARM64UDIVW return true case OpDiv64: return rewriteValueARM64_OpDiv64(v) case OpDiv64F: v.Op = OpARM64FDIVD return true case OpDiv64u: v.Op = OpARM64UDIV return true case OpDiv8: return rewriteValueARM64_OpDiv8(v) case OpDiv8u: return rewriteValueARM64_OpDiv8u(v) case OpEq16: return rewriteValueARM64_OpEq16(v) case OpEq32: return rewriteValueARM64_OpEq32(v) case OpEq32F: return rewriteValueARM64_OpEq32F(v) case OpEq64: return rewriteValueARM64_OpEq64(v) case OpEq64F: return rewriteValueARM64_OpEq64F(v) case OpEq8: return rewriteValueARM64_OpEq8(v) case OpEqB: return rewriteValueARM64_OpEqB(v) case OpEqPtr: return rewriteValueARM64_OpEqPtr(v) case OpFMA: return rewriteValueARM64_OpFMA(v) case OpFloor: v.Op = OpARM64FRINTMD return true case OpGetCallerPC: v.Op = OpARM64LoweredGetCallerPC return true case OpGetCallerSP: v.Op = OpARM64LoweredGetCallerSP return true case OpGetClosurePtr: v.Op = OpARM64LoweredGetClosurePtr return true case OpHmul32: return rewriteValueARM64_OpHmul32(v) case OpHmul32u: return rewriteValueARM64_OpHmul32u(v) case OpHmul64: v.Op = OpARM64MULH return true case OpHmul64u: v.Op = OpARM64UMULH return true case OpInterCall: v.Op = OpARM64CALLinter return true case OpIsInBounds: return rewriteValueARM64_OpIsInBounds(v) case OpIsNonNil: return rewriteValueARM64_OpIsNonNil(v) case OpIsSliceInBounds: return rewriteValueARM64_OpIsSliceInBounds(v) case OpLeq16: return rewriteValueARM64_OpLeq16(v) case OpLeq16U: return rewriteValueARM64_OpLeq16U(v) case OpLeq32: return rewriteValueARM64_OpLeq32(v) case OpLeq32F: return rewriteValueARM64_OpLeq32F(v) case OpLeq32U: return rewriteValueARM64_OpLeq32U(v) case OpLeq64: return rewriteValueARM64_OpLeq64(v) case OpLeq64F: return rewriteValueARM64_OpLeq64F(v) case OpLeq64U: return rewriteValueARM64_OpLeq64U(v) case OpLeq8: return rewriteValueARM64_OpLeq8(v) case OpLeq8U: return rewriteValueARM64_OpLeq8U(v) case OpLess16: return rewriteValueARM64_OpLess16(v) case OpLess16U: return rewriteValueARM64_OpLess16U(v) case OpLess32: return rewriteValueARM64_OpLess32(v) case OpLess32F: return rewriteValueARM64_OpLess32F(v) case OpLess32U: return rewriteValueARM64_OpLess32U(v) case OpLess64: return rewriteValueARM64_OpLess64(v) case OpLess64F: return rewriteValueARM64_OpLess64F(v) case OpLess64U: return rewriteValueARM64_OpLess64U(v) case OpLess8: return rewriteValueARM64_OpLess8(v) case OpLess8U: return rewriteValueARM64_OpLess8U(v) case OpLoad: return rewriteValueARM64_OpLoad(v) case OpLocalAddr: return rewriteValueARM64_OpLocalAddr(v) case OpLsh16x16: return rewriteValueARM64_OpLsh16x16(v) case OpLsh16x32: return rewriteValueARM64_OpLsh16x32(v) case OpLsh16x64: return rewriteValueARM64_OpLsh16x64(v) case OpLsh16x8: return rewriteValueARM64_OpLsh16x8(v) case OpLsh32x16: return rewriteValueARM64_OpLsh32x16(v) case OpLsh32x32: return rewriteValueARM64_OpLsh32x32(v) case OpLsh32x64: return rewriteValueARM64_OpLsh32x64(v) case OpLsh32x8: return rewriteValueARM64_OpLsh32x8(v) case OpLsh64x16: return rewriteValueARM64_OpLsh64x16(v) case OpLsh64x32: return rewriteValueARM64_OpLsh64x32(v) case OpLsh64x64: return rewriteValueARM64_OpLsh64x64(v) case OpLsh64x8: return rewriteValueARM64_OpLsh64x8(v) case OpLsh8x16: return rewriteValueARM64_OpLsh8x16(v) case OpLsh8x32: return rewriteValueARM64_OpLsh8x32(v) case OpLsh8x64: return rewriteValueARM64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueARM64_OpLsh8x8(v) case OpMax32F: v.Op = OpARM64FMAXS return true case OpMax64F: v.Op = OpARM64FMAXD return true case OpMin32F: v.Op = OpARM64FMINS return true case OpMin64F: v.Op = OpARM64FMIND return true case OpMod16: return rewriteValueARM64_OpMod16(v) case OpMod16u: return rewriteValueARM64_OpMod16u(v) case OpMod32: return rewriteValueARM64_OpMod32(v) case OpMod32u: v.Op = OpARM64UMODW return true case OpMod64: return rewriteValueARM64_OpMod64(v) case OpMod64u: v.Op = OpARM64UMOD return true case OpMod8: return rewriteValueARM64_OpMod8(v) case OpMod8u: return rewriteValueARM64_OpMod8u(v) case OpMove: return rewriteValueARM64_OpMove(v) case OpMul16: v.Op = OpARM64MULW return true case OpMul32: v.Op = OpARM64MULW return true case OpMul32F: v.Op = OpARM64FMULS return true case OpMul64: v.Op = OpARM64MUL return true case OpMul64F: v.Op = OpARM64FMULD return true case OpMul8: v.Op = OpARM64MULW return true case OpNeg16: v.Op = OpARM64NEG return true case OpNeg32: v.Op = OpARM64NEG return true case OpNeg32F: v.Op = OpARM64FNEGS return true case OpNeg64: v.Op = OpARM64NEG return true case OpNeg64F: v.Op = OpARM64FNEGD return true case OpNeg8: v.Op = OpARM64NEG return true case OpNeq16: return rewriteValueARM64_OpNeq16(v) case OpNeq32: return rewriteValueARM64_OpNeq32(v) case OpNeq32F: return rewriteValueARM64_OpNeq32F(v) case OpNeq64: return rewriteValueARM64_OpNeq64(v) case OpNeq64F: return rewriteValueARM64_OpNeq64F(v) case OpNeq8: return rewriteValueARM64_OpNeq8(v) case OpNeqB: v.Op = OpARM64XOR return true case OpNeqPtr: return rewriteValueARM64_OpNeqPtr(v) case OpNilCheck: v.Op = OpARM64LoweredNilCheck return true case OpNot: return rewriteValueARM64_OpNot(v) case OpOffPtr: return rewriteValueARM64_OpOffPtr(v) case OpOr16: v.Op = OpARM64OR return true case OpOr32: v.Op = OpARM64OR return true case OpOr64: v.Op = OpARM64OR return true case OpOr8: v.Op = OpARM64OR return true case OpOrB: v.Op = OpARM64OR return true case OpPanicBounds: return rewriteValueARM64_OpPanicBounds(v) case OpPopCount16: return rewriteValueARM64_OpPopCount16(v) case OpPopCount32: return rewriteValueARM64_OpPopCount32(v) case OpPopCount64: return rewriteValueARM64_OpPopCount64(v) case OpPrefetchCache: return rewriteValueARM64_OpPrefetchCache(v) case OpPrefetchCacheStreamed: return rewriteValueARM64_OpPrefetchCacheStreamed(v) case OpPubBarrier: return rewriteValueARM64_OpPubBarrier(v) case OpRotateLeft16: return rewriteValueARM64_OpRotateLeft16(v) case OpRotateLeft32: return rewriteValueARM64_OpRotateLeft32(v) case OpRotateLeft64: return rewriteValueARM64_OpRotateLeft64(v) case OpRotateLeft8: return rewriteValueARM64_OpRotateLeft8(v) case OpRound: v.Op = OpARM64FRINTAD return true case OpRound32F: v.Op = OpARM64LoweredRound32F return true case OpRound64F: v.Op = OpARM64LoweredRound64F return true case OpRoundToEven: v.Op = OpARM64FRINTND return true case OpRsh16Ux16: return rewriteValueARM64_OpRsh16Ux16(v) case OpRsh16Ux32: return rewriteValueARM64_OpRsh16Ux32(v) case OpRsh16Ux64: return rewriteValueARM64_OpRsh16Ux64(v) case OpRsh16Ux8: return rewriteValueARM64_OpRsh16Ux8(v) case OpRsh16x16: return rewriteValueARM64_OpRsh16x16(v) case OpRsh16x32: return rewriteValueARM64_OpRsh16x32(v) case OpRsh16x64: return rewriteValueARM64_OpRsh16x64(v) case OpRsh16x8: return rewriteValueARM64_OpRsh16x8(v) case OpRsh32Ux16: return rewriteValueARM64_OpRsh32Ux16(v) case OpRsh32Ux32: return rewriteValueARM64_OpRsh32Ux32(v) case OpRsh32Ux64: return rewriteValueARM64_OpRsh32Ux64(v) case OpRsh32Ux8: return rewriteValueARM64_OpRsh32Ux8(v) case OpRsh32x16: return rewriteValueARM64_OpRsh32x16(v) case OpRsh32x32: return rewriteValueARM64_OpRsh32x32(v) case OpRsh32x64: return rewriteValueARM64_OpRsh32x64(v) case OpRsh32x8: return rewriteValueARM64_OpRsh32x8(v) case OpRsh64Ux16: return rewriteValueARM64_OpRsh64Ux16(v) case OpRsh64Ux32: return rewriteValueARM64_OpRsh64Ux32(v) case OpRsh64Ux64: return rewriteValueARM64_OpRsh64Ux64(v) case OpRsh64Ux8: return rewriteValueARM64_OpRsh64Ux8(v) case OpRsh64x16: return rewriteValueARM64_OpRsh64x16(v) case OpRsh64x32: return rewriteValueARM64_OpRsh64x32(v) case OpRsh64x64: return rewriteValueARM64_OpRsh64x64(v) case OpRsh64x8: return rewriteValueARM64_OpRsh64x8(v) case OpRsh8Ux16: return rewriteValueARM64_OpRsh8Ux16(v) case OpRsh8Ux32: return rewriteValueARM64_OpRsh8Ux32(v) case OpRsh8Ux64: return rewriteValueARM64_OpRsh8Ux64(v) case OpRsh8Ux8: return rewriteValueARM64_OpRsh8Ux8(v) case OpRsh8x16: return rewriteValueARM64_OpRsh8x16(v) case OpRsh8x32: return rewriteValueARM64_OpRsh8x32(v) case OpRsh8x64: return rewriteValueARM64_OpRsh8x64(v) case OpRsh8x8: return rewriteValueARM64_OpRsh8x8(v) case OpSelect0: return rewriteValueARM64_OpSelect0(v) case OpSelect1: return rewriteValueARM64_OpSelect1(v) case OpSelectN: return rewriteValueARM64_OpSelectN(v) case OpSignExt16to32: v.Op = OpARM64MOVHreg return true case OpSignExt16to64: v.Op = OpARM64MOVHreg return true case OpSignExt32to64: v.Op = OpARM64MOVWreg return true case OpSignExt8to16: v.Op = OpARM64MOVBreg return true case OpSignExt8to32: v.Op = OpARM64MOVBreg return true case OpSignExt8to64: v.Op = OpARM64MOVBreg return true case OpSlicemask: return rewriteValueARM64_OpSlicemask(v) case OpSqrt: v.Op = OpARM64FSQRTD return true case OpSqrt32: v.Op = OpARM64FSQRTS return true case OpStaticCall: v.Op = OpARM64CALLstatic return true case OpStore: return rewriteValueARM64_OpStore(v) case OpSub16: v.Op = OpARM64SUB return true case OpSub32: v.Op = OpARM64SUB return true case OpSub32F: v.Op = OpARM64FSUBS return true case OpSub64: v.Op = OpARM64SUB return true case OpSub64F: v.Op = OpARM64FSUBD return true case OpSub8: v.Op = OpARM64SUB return true case OpSubPtr: v.Op = OpARM64SUB return true case OpTailCall: v.Op = OpARM64CALLtail return true case OpTrunc: v.Op = OpARM64FRINTZD return true case OpTrunc16to8: v.Op = OpCopy return true case OpTrunc32to16: v.Op = OpCopy return true case OpTrunc32to8: v.Op = OpCopy return true case OpTrunc64to16: v.Op = OpCopy return true case OpTrunc64to32: v.Op = OpCopy return true case OpTrunc64to8: v.Op = OpCopy return true case OpWB: v.Op = OpARM64LoweredWB return true case OpXor16: v.Op = OpARM64XOR return true case OpXor32: v.Op = OpARM64XOR return true case OpXor64: v.Op = OpARM64XOR return true case OpXor8: v.Op = OpARM64XOR return true case OpZero: return rewriteValueARM64_OpZero(v) case OpZeroExt16to32: v.Op = OpARM64MOVHUreg return true case OpZeroExt16to64: v.Op = OpARM64MOVHUreg return true case OpZeroExt32to64: v.Op = OpARM64MOVWUreg return true case OpZeroExt8to16: v.Op = OpARM64MOVBUreg return true case OpZeroExt8to32: v.Op = OpARM64MOVBUreg return true case OpZeroExt8to64: v.Op = OpARM64MOVBUreg return true } return false } func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) // result: (ADCSflags x y c) for { x := v_0 y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } v_2_0 := v_2.Args[0] if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { break } v_2_0_0 := v_2_0.Args[0] if v_2_0_0.Op != OpARM64ADCzerocarry || v_2_0_0.Type != typ.UInt64 { break } c := v_2_0_0.Args[0] v.reset(OpARM64ADCSflags) v.AddArg3(x, y, c) return true } // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) // result: (ADDSflags x y) for { x := v_0 y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } v_2_0 := v_2.Args[0] if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { break } v_2_0_0 := v_2_0.Args[0] if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { break } v.reset(OpARM64ADDSflags) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADD x (MOVDconst <t> [c])) // cond: !t.IsPtr() // result: (ADDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } t := v_1.Type c := auxIntToInt64(v_1.AuxInt) if !(!t.IsPtr()) { continue } v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (ADD a l:(MUL x y)) // cond: l.Uses==1 && clobber(l) // result: (MADD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 l := v_1 if l.Op != OpARM64MUL { continue } y := l.Args[1] x := l.Args[0] if !(l.Uses == 1 && clobber(l)) { continue } v.reset(OpARM64MADD) v.AddArg3(a, x, y) return true } break } // match: (ADD a l:(MNEG x y)) // cond: l.Uses==1 && clobber(l) // result: (MSUB a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 l := v_1 if l.Op != OpARM64MNEG { continue } y := l.Args[1] x := l.Args[0] if !(l.Uses == 1 && clobber(l)) { continue } v.reset(OpARM64MSUB) v.AddArg3(a, x, y) return true } break } // match: (ADD a l:(MULW x y)) // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) // result: (MADDW a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 l := v_1 if l.Op != OpARM64MULW { continue } y := l.Args[1] x := l.Args[0] if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { continue } v.reset(OpARM64MADDW) v.AddArg3(a, x, y) return true } break } // match: (ADD a l:(MNEGW x y)) // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) // result: (MSUBW a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 l := v_1 if l.Op != OpARM64MNEGW { continue } y := l.Args[1] x := l.Args[0] if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { continue } v.reset(OpARM64MSUBW) v.AddArg3(a, x, y) return true } break } // match: (ADD x (NEG y)) // result: (SUB x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64NEG { continue } y := v_1.Args[0] v.reset(OpARM64SUB) v.AddArg2(x, y) return true } break } // match: (ADD x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (ADD x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ADDshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (ADD x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ADDshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ADDshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } return false } func rewriteValueARM64_OpARM64ADDSflags(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDSflags x (MOVDconst [c])) // result: (ADDSconstflags [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ADDSconstflags) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } return false } func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { v_0 := v.Args[0] // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) // cond: is32Bit(off1+int64(off2)) // result: (MOVDaddr [int32(off1)+off2] {sym} ptr) for { off1 := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym := auxToSym(v_0.Aux) ptr := v_0.Args[0] if !(is32Bit(off1 + int64(off2))) { break } v.reset(OpARM64MOVDaddr) v.AuxInt = int32ToAuxInt(int32(off1) + off2) v.Aux = symToAux(sym) v.AddArg(ptr) return true } // match: (ADDconst [c] y) // cond: c < 0 // result: (SUBconst [-c] y) for { c := auxIntToInt64(v.AuxInt) y := v_0 if !(c < 0) { break } v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(-c) v.AddArg(y) return true } // match: (ADDconst [0] x) // result: x for { if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 v.copyOf(x) return true } // match: (ADDconst [c] (MOVDconst [d])) // result: (MOVDconst [c+d]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c + d) return true } // match: (ADDconst [c] (ADDconst [d] x)) // result: (ADDconst [c+d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ADDconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c + d) v.AddArg(x) return true } // match: (ADDconst [c] (SUBconst [d] x)) // result: (ADDconst [c-d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SUBconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c - d) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ADDshiftLL (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ADDshiftLL x (MOVDconst [c]) [d]) // result: (ADDconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff // result: (REV16W x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) // result: (REV16 x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { break } v.reset(OpARM64REV16) v.AddArg(x) return true } // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) // result: (REV16 (ANDconst <x.Type> [0xffffffff] x)) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16) v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) v0.AuxInt = int64ToAuxInt(0xffffffff) v0.AddArg(x) v.AddArg(v0) return true } // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { break } x := v_0.Args[0] x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = int64ToAuxInt(64 - c) v.AddArg2(x2, x) return true } // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x2) // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) v.AuxInt = int64ToAuxInt(32 - c) v.AddArg2(x2, x) return true } return false } func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ADDshiftRA (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ADDshiftRA x (MOVDconst [c]) [d]) // result: (ADDconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ADDshiftRL (MOVDconst [c]) x [d]) // result: (ADDconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ADDshiftRL x (MOVDconst [c]) [d]) // result: (ADDconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (AND x (MOVDconst [c])) // result: (ANDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (AND x x) // result: x for { x := v_0 if x != v_1 { break } v.copyOf(x) return true } // match: (AND x (MVN y)) // result: (BIC x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MVN { continue } y := v_1.Args[0] v.reset(OpARM64BIC) v.AddArg2(x, y) return true } break } // match: (AND x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ANDshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (AND x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ANDshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (AND x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ANDshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (AND x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (ANDshiftRO x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ANDshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } return false } func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { v_0 := v.Args[0] // match: (ANDconst [0] _) // result: (MOVDconst [0]) for { if auxIntToInt64(v.AuxInt) != 0 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (ANDconst [-1] x) // result: x for { if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 v.copyOf(x) return true } // match: (ANDconst [c] (MOVDconst [d])) // result: (MOVDconst [c&d]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c & d) return true } // match: (ANDconst [c] (ANDconst [d] x)) // result: (ANDconst [c&d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & d) v.AddArg(x) return true } // match: (ANDconst [c] (MOVWUreg x)) // result: (ANDconst [c&(1<<32-1)] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) v.AddArg(x) return true } // match: (ANDconst [c] (MOVHUreg x)) // result: (ANDconst [c&(1<<16-1)] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) v.AddArg(x) return true } // match: (ANDconst [c] (MOVBUreg x)) // result: (ANDconst [c&(1<<8-1)] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) v.AddArg(x) return true } // match: (ANDconst [ac] (SLLconst [sc] x)) // cond: isARM64BFMask(sc, ac, sc) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { ac := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(sc, ac, sc)) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc))) v.AddArg(x) return true } // match: (ANDconst [ac] (SRLconst [sc] x)) // cond: isARM64BFMask(sc, ac, 0) // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) for { ac := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(sc, ac, 0)) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0))) v.AddArg(x) return true } // match: (ANDconst [c] (UBFX [bfc] x)) // cond: isARM64BFMask(0, c, 0) // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(0, c, 0)) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ANDshiftLL (MOVDconst [c]) x [d]) // result: (ANDconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ANDshiftLL x (MOVDconst [c]) [d]) // result: (ANDconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (ANDshiftLL y:(SLLconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ANDshiftRA (MOVDconst [c]) x [d]) // result: (ANDconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ANDshiftRA x (MOVDconst [c]) [d]) // result: (ANDconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } // match: (ANDshiftRA y:(SRAconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ANDshiftRL (MOVDconst [c]) x [d]) // result: (ANDconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ANDshiftRL x (MOVDconst [c]) [d]) // result: (ANDconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (ANDshiftRL y:(SRLconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64ANDshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ANDshiftRO (MOVDconst [c]) x [d]) // result: (ANDconst [c] (RORconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ANDshiftRO x (MOVDconst [c]) [d]) // result: (ANDconst x [rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) v.AddArg(x) return true } // match: (ANDshiftRO y:(RORconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64BIC(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BIC x (MOVDconst [c])) // result: (ANDconst [^c] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } // match: (BIC x x) // result: (MOVDconst [0]) for { x := v_0 if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (BIC x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (BICshiftLL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64BICshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (BICshiftRL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64BICshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (BICshiftRA x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64BICshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (BIC x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (BICshiftRO x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64BICshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } return false } func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftLL x (MOVDconst [c]) [d]) // result: (ANDconst x [^int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (BICshiftLL (SLLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftRA x (MOVDconst [c]) [d]) // result: (ANDconst x [^(c>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) v.AddArg(x) return true } // match: (BICshiftRA (SRAconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftRL x (MOVDconst [c]) [d]) // result: (ANDconst x [^int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (BICshiftRL (SRLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64BICshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftRO x (MOVDconst [c]) [d]) // result: (ANDconst x [^rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) v.AddArg(x) return true } // match: (BICshiftRO (RORconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64CMN(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMN x (MOVDconst [c])) // result: (CMNconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (CMN x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64CMNshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (CMN x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64CMNshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (CMN x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (CMNshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64CMNshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } return false } func rewriteValueARM64_OpARM64CMNW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMNW x (MOVDconst [c])) // result: (CMNWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNWconst) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } break } return false } func rewriteValueARM64_OpARM64CMNWconst(v *Value) bool { v_0 := v.Args[0] // match: (CMNWconst [c] y) // cond: c < 0 && c != -1<<31 // result: (CMPWconst [-c] y) for { c := auxIntToInt32(v.AuxInt) y := v_0 if !(c < 0 && c != -1<<31) { break } v.reset(OpARM64CMPWconst) v.AuxInt = int32ToAuxInt(-c) v.AddArg(y) return true } // match: (CMNWconst (MOVDconst [x]) [y]) // result: (FlagConstant [addFlags32(int32(x),y)]) for { y := auxIntToInt32(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(addFlags32(int32(x), y)) return true } return false } func rewriteValueARM64_OpARM64CMNconst(v *Value) bool { v_0 := v.Args[0] // match: (CMNconst [c] y) // cond: c < 0 && c != -1<<63 // result: (CMPconst [-c] y) for { c := auxIntToInt64(v.AuxInt) y := v_0 if !(c < 0 && c != -1<<63) { break } v.reset(OpARM64CMPconst) v.AuxInt = int64ToAuxInt(-c) v.AddArg(y) return true } // match: (CMNconst (MOVDconst [x]) [y]) // result: (FlagConstant [addFlags64(x,y)]) for { y := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(addFlags64(x, y)) return true } return false } func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMNshiftLL (MOVDconst [c]) x [d]) // result: (CMNconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMNshiftLL x (MOVDconst [c]) [d]) // result: (CMNconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMNshiftRA (MOVDconst [c]) x [d]) // result: (CMNconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMNshiftRA x (MOVDconst [c]) [d]) // result: (CMNconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMNshiftRL (MOVDconst [c]) x [d]) // result: (CMNconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMNshiftRL x (MOVDconst [c]) [d]) // result: (CMNconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CMP(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMP x (MOVDconst [c])) // result: (CMPconst [c] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } // match: (CMP (MOVDconst [c]) x) // result: (InvertFlags (CMPconst [c] x)) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMP x y) // cond: canonLessThan(x,y) // result: (InvertFlags (CMP y x)) for { x := v_0 y := v_1 if !(canonLessThan(x, y)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(y, x) v.AddArg(v0) return true } // match: (CMP x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMPshiftLL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64CMPshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (CMP x0:(SLLconst [c] y) x1) // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftLL x1 y [c])) for { x0 := v_0 if x0.Op != OpARM64SLLconst { break } c := auxIntToInt64(x0.AuxInt) y := x0.Args[0] x1 := v_1 if !(clobberIfDead(x0)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg2(x1, y) v.AddArg(v0) return true } // match: (CMP x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (CMPshiftRL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64CMPshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (CMP x0:(SRLconst [c] y) x1) // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftRL x1 y [c])) for { x0 := v_0 if x0.Op != OpARM64SRLconst { break } c := auxIntToInt64(x0.AuxInt) y := x0.Args[0] x1 := v_1 if !(clobberIfDead(x0)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg2(x1, y) v.AddArg(v0) return true } // match: (CMP x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (CMPshiftRA x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64CMPshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (CMP x0:(SRAconst [c] y) x1) // cond: clobberIfDead(x0) // result: (InvertFlags (CMPshiftRA x1 y [c])) for { x0 := v_0 if x0.Op != OpARM64SRAconst { break } c := auxIntToInt64(x0.AuxInt) y := x0.Args[0] x1 := v_1 if !(clobberIfDead(x0)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg2(x1, y) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64CMPW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMPW x (MOVDconst [c])) // result: (CMPWconst [int32(c)] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPWconst) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } // match: (CMPW (MOVDconst [c]) x) // result: (InvertFlags (CMPWconst [int32(c)] x)) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(x) v.AddArg(v0) return true } // match: (CMPW x y) // cond: canonLessThan(x,y) // result: (InvertFlags (CMPW y x)) for { x := v_0 y := v_1 if !(canonLessThan(x, y)) { break } v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(y, x) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool { v_0 := v.Args[0] // match: (CMPWconst [c] y) // cond: c < 0 && c != -1<<31 // result: (CMNWconst [-c] y) for { c := auxIntToInt32(v.AuxInt) y := v_0 if !(c < 0 && c != -1<<31) { break } v.reset(OpARM64CMNWconst) v.AuxInt = int32ToAuxInt(-c) v.AddArg(y) return true } // match: (CMPWconst (MOVDconst [x]) [y]) // result: (FlagConstant [subFlags32(int32(x),y)]) for { y := auxIntToInt32(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags32(int32(x), y)) return true } // match: (CMPWconst (MOVBUreg _) [c]) // cond: 0xff < c // result: (FlagConstant [subFlags64(0,1)]) for { c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARM64MOVBUreg || !(0xff < c) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } // match: (CMPWconst (MOVHUreg _) [c]) // cond: 0xffff < c // result: (FlagConstant [subFlags64(0,1)]) for { c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } return false } func rewriteValueARM64_OpARM64CMPconst(v *Value) bool { v_0 := v.Args[0] // match: (CMPconst [c] y) // cond: c < 0 && c != -1<<63 // result: (CMNconst [-c] y) for { c := auxIntToInt64(v.AuxInt) y := v_0 if !(c < 0 && c != -1<<63) { break } v.reset(OpARM64CMNconst) v.AuxInt = int64ToAuxInt(-c) v.AddArg(y) return true } // match: (CMPconst (MOVDconst [x]) [y]) // result: (FlagConstant [subFlags64(x,y)]) for { y := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(x, y)) return true } // match: (CMPconst (MOVBUreg _) [c]) // cond: 0xff < c // result: (FlagConstant [subFlags64(0,1)]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg || !(0xff < c) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } // match: (CMPconst (MOVHUreg _) [c]) // cond: 0xffff < c // result: (FlagConstant [subFlags64(0,1)]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } // match: (CMPconst (MOVWUreg _) [c]) // cond: 0xffffffff < c // result: (FlagConstant [subFlags64(0,1)]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } // match: (CMPconst (ANDconst _ [m]) [n]) // cond: 0 <= m && m < n // result: (FlagConstant [subFlags64(0,1)]) for { n := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } m := auxIntToInt64(v_0.AuxInt) if !(0 <= m && m < n) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } // match: (CMPconst (SRLconst _ [c]) [n]) // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) // result: (FlagConstant [subFlags64(0,1)]) for { n := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst { break } c := auxIntToInt64(v_0.AuxInt) if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) { break } v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1)) return true } return false } func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMPshiftLL (MOVDconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d]))) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v1.AuxInt = int64ToAuxInt(d) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } // match: (CMPshiftLL x (MOVDconst [c]) [d]) // result: (CMPconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMPshiftRA (MOVDconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d]))) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v1.AuxInt = int64ToAuxInt(d) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } // match: (CMPshiftRA x (MOVDconst [c]) [d]) // result: (CMPconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CMPshiftRL (MOVDconst [c]) x [d]) // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d]))) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v1.AuxInt = int64ToAuxInt(d) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } // match: (CMPshiftRL x (MOVDconst [c]) [d]) // result: (CMPconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64CSEL(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) // result: (CSETM [cc] flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != -1 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } flag := v_2 v.reset(OpARM64CSETM) v.AuxInt = opToAuxInt(cc) v.AddArg(flag) return true } // match: (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) // result: (CSETM [arm64Negate(cc)] flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { break } flag := v_2 v.reset(OpARM64CSETM) v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg(flag) return true } // match: (CSEL [cc] x (MOVDconst [0]) flag) // result: (CSEL0 [cc] x flag) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } flag := v_2 v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(cc) v.AddArg2(x, flag) return true } // match: (CSEL [cc] (MOVDconst [0]) y flag) // result: (CSEL0 [arm64Negate(cc)] y flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 { break } y := v_1 flag := v_2 v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg2(y, flag) return true } // match: (CSEL [cc] x (ADDconst [1] a) flag) // result: (CSINC [cc] x a flag) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } a := v_1.Args[0] flag := v_2 v.reset(OpARM64CSINC) v.AuxInt = opToAuxInt(cc) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] (ADDconst [1] a) x flag) // result: (CSINC [arm64Negate(cc)] x a flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64ADDconst || auxIntToInt64(v_0.AuxInt) != 1 { break } a := v_0.Args[0] x := v_1 flag := v_2 v.reset(OpARM64CSINC) v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] x (MVN a) flag) // result: (CSINV [cc] x a flag) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64MVN { break } a := v_1.Args[0] flag := v_2 v.reset(OpARM64CSINV) v.AuxInt = opToAuxInt(cc) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] (MVN a) x flag) // result: (CSINV [arm64Negate(cc)] x a flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64MVN { break } a := v_0.Args[0] x := v_1 flag := v_2 v.reset(OpARM64CSINV) v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] x (NEG a) flag) // result: (CSNEG [cc] x a flag) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64NEG { break } a := v_1.Args[0] flag := v_2 v.reset(OpARM64CSNEG) v.AuxInt = opToAuxInt(cc) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] (NEG a) x flag) // result: (CSNEG [arm64Negate(cc)] x a flag) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64NEG { break } a := v_0.Args[0] x := v_1 flag := v_2 v.reset(OpARM64CSNEG) v.AuxInt = opToAuxInt(arm64Negate(cc)) v.AddArg3(x, a, flag) return true } // match: (CSEL [cc] x y (InvertFlags cmp)) // result: (CSEL [arm64Invert(cc)] x y cmp) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64InvertFlags { break } cmp := v_2.Args[0] v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg3(x, y, cmp) return true } // match: (CSEL [cc] x _ flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { break } v.copyOf(x) return true } // match: (CSEL [cc] _ y flag) // cond: ccARM64Eval(cc, flag) < 0 // result: y for { cc := auxIntToOp(v.AuxInt) y := v_1 flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { break } v.copyOf(y) return true } // match: (CSEL [cc] x y (CMPWconst [0] boolval)) // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil // result: (CSEL [boolval.Op] x y flagArg(boolval)) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { break } boolval := v_2.Args[0] if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(boolval.Op) v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CSEL [cc] x y (CMPWconst [0] boolval)) // cond: cc == OpARM64Equal && flagArg(boolval) != nil // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval)) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { break } boolval := v_2.Args[0] if !(cc == OpARM64Equal && flagArg(boolval) != nil) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) v.AddArg3(x, y, flagArg(boolval)) return true } return false } func rewriteValueARM64_OpARM64CSEL0(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CSEL0 [cc] x (InvertFlags cmp)) // result: (CSEL0 [arm64Invert(cc)] x cmp) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64InvertFlags { break } cmp := v_1.Args[0] v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg2(x, cmp) return true } // match: (CSEL0 [cc] x flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_1 if !(ccARM64Eval(cc, flag) > 0) { break } v.copyOf(x) return true } // match: (CSEL0 [cc] _ flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (MOVDconst [0]) for { cc := auxIntToOp(v.AuxInt) flag := v_1 if !(ccARM64Eval(cc, flag) < 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil // result: (CSEL0 [boolval.Op] x flagArg(boolval)) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { break } boolval := v_1.Args[0] if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) { break } v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(boolval.Op) v.AddArg2(x, flagArg(boolval)) return true } // match: (CSEL0 [cc] x (CMPWconst [0] boolval)) // cond: cc == OpARM64Equal && flagArg(boolval) != nil // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval)) for { cc := auxIntToOp(v.AuxInt) x := v_0 if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { break } boolval := v_1.Args[0] if !(cc == OpARM64Equal && flagArg(boolval) != nil) { break } v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(arm64Negate(boolval.Op)) v.AddArg2(x, flagArg(boolval)) return true } return false } func rewriteValueARM64_OpARM64CSETM(v *Value) bool { v_0 := v.Args[0] // match: (CSETM [cc] (InvertFlags cmp)) // result: (CSETM [arm64Invert(cc)] cmp) for { cc := auxIntToOp(v.AuxInt) if v_0.Op != OpARM64InvertFlags { break } cmp := v_0.Args[0] v.reset(OpARM64CSETM) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg(cmp) return true } // match: (CSETM [cc] flag) // cond: ccARM64Eval(cc, flag) > 0 // result: (MOVDconst [-1]) for { cc := auxIntToOp(v.AuxInt) flag := v_0 if !(ccARM64Eval(cc, flag) > 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } // match: (CSETM [cc] flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (MOVDconst [0]) for { cc := auxIntToOp(v.AuxInt) flag := v_0 if !(ccARM64Eval(cc, flag) < 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64CSINC(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (CSINC [cc] x y (InvertFlags cmp)) // result: (CSINC [arm64Invert(cc)] x y cmp) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64InvertFlags { break } cmp := v_2.Args[0] v.reset(OpARM64CSINC) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg3(x, y, cmp) return true } // match: (CSINC [cc] x _ flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { break } v.copyOf(x) return true } // match: (CSINC [cc] _ y flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (ADDconst [1] y) for { cc := auxIntToOp(v.AuxInt) y := v_1 flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { break } v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(1) v.AddArg(y) return true } return false } func rewriteValueARM64_OpARM64CSINV(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (CSINV [cc] x y (InvertFlags cmp)) // result: (CSINV [arm64Invert(cc)] x y cmp) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64InvertFlags { break } cmp := v_2.Args[0] v.reset(OpARM64CSINV) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg3(x, y, cmp) return true } // match: (CSINV [cc] x _ flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { break } v.copyOf(x) return true } // match: (CSINV [cc] _ y flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (Not y) for { cc := auxIntToOp(v.AuxInt) y := v_1 flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { break } v.reset(OpNot) v.AddArg(y) return true } return false } func rewriteValueARM64_OpARM64CSNEG(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (CSNEG [cc] x y (InvertFlags cmp)) // result: (CSNEG [arm64Invert(cc)] x y cmp) for { cc := auxIntToOp(v.AuxInt) x := v_0 y := v_1 if v_2.Op != OpARM64InvertFlags { break } cmp := v_2.Args[0] v.reset(OpARM64CSNEG) v.AuxInt = opToAuxInt(arm64Invert(cc)) v.AddArg3(x, y, cmp) return true } // match: (CSNEG [cc] x _ flag) // cond: ccARM64Eval(cc, flag) > 0 // result: x for { cc := auxIntToOp(v.AuxInt) x := v_0 flag := v_2 if !(ccARM64Eval(cc, flag) > 0) { break } v.copyOf(x) return true } // match: (CSNEG [cc] _ y flag) // cond: ccARM64Eval(cc, flag) < 0 // result: (NEG y) for { cc := auxIntToOp(v.AuxInt) y := v_1 flag := v_2 if !(ccARM64Eval(cc, flag) < 0) { break } v.reset(OpARM64NEG) v.AddArg(y) return true } return false } func rewriteValueARM64_OpARM64DIV(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (DIV (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [c/d]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c / d) return true } return false } func rewriteValueARM64_OpARM64DIVW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint32(int32(c)/int32(d)))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) / int32(d)))) return true } return false } func rewriteValueARM64_OpARM64EON(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (EON x (MOVDconst [c])) // result: (XORconst [^c] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } // match: (EON x x) // result: (MOVDconst [-1]) for { x := v_0 if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } // match: (EON x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (EONshiftLL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64EONshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (EONshiftRL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64EONshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (EON x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (EONshiftRA x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64EONshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (EON x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (EONshiftRO x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64EONshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } return false } func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (EONshiftLL x (MOVDconst [c]) [d]) // result: (XORconst x [^int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (EONshiftLL (SLLconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (EONshiftRA x (MOVDconst [c]) [d]) // result: (XORconst x [^(c>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) v.AddArg(x) return true } // match: (EONshiftRA (SRAconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (EONshiftRL x (MOVDconst [c]) [d]) // result: (XORconst x [^int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (EONshiftRL (SRLconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64EONshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (EONshiftRO x (MOVDconst [c]) [d]) // result: (XORconst x [^rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) v.AddArg(x) return true } // match: (EONshiftRO (RORconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64Equal(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Equal (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (Equal (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (Equal (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (Equal (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (Equal (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (Equal (CMP x z:(NEG y))) // cond: z.Uses == 1 // result: (Equal (CMN x y)) for { if v_0.Op != OpARM64CMP { break } _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPW x z:(NEG y))) // cond: z.Uses == 1 // result: (Equal (CMNW x y)) for { if v_0.Op != OpARM64CMPW { break } _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (Equal (CMNconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (Equal (CMNWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (Equal (CMPconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (Equal (CMN x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (Equal (CMNW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (Equal (CMPconst [0] z:(MADD a x y))) // cond: z.Uses == 1 // result: (Equal (CMN a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (Equal (CMPconst [0] z:(MSUB a x y))) // cond: z.Uses == 1 // result: (Equal (CMP a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] z:(MADDW a x y))) // cond: z.Uses == 1 // result: (Equal (CMNW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (Equal (CMPWconst [0] z:(MSUBW a x y))) // cond: z.Uses == 1 // result: (Equal (CMPW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (Equal (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.eq())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.eq())) return true } // match: (Equal (InvertFlags x)) // result: (Equal x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64Equal) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64FADDD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FADDD a (FMULD x y)) // cond: a.Block.Func.useFMA(v) // result: (FMADDD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 if v_1.Op != OpARM64FMULD { continue } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { continue } v.reset(OpARM64FMADDD) v.AddArg3(a, x, y) return true } break } // match: (FADDD a (FNMULD x y)) // cond: a.Block.Func.useFMA(v) // result: (FMSUBD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 if v_1.Op != OpARM64FNMULD { continue } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { continue } v.reset(OpARM64FMSUBD) v.AddArg3(a, x, y) return true } break } return false } func rewriteValueARM64_OpARM64FADDS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FADDS a (FMULS x y)) // cond: a.Block.Func.useFMA(v) // result: (FMADDS a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 if v_1.Op != OpARM64FMULS { continue } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { continue } v.reset(OpARM64FMADDS) v.AddArg3(a, x, y) return true } break } // match: (FADDS a (FNMULS x y)) // cond: a.Block.Func.useFMA(v) // result: (FMSUBS a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { a := v_0 if v_1.Op != OpARM64FNMULS { continue } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { continue } v.reset(OpARM64FMSUBS) v.AddArg3(a, x, y) return true } break } return false } func rewriteValueARM64_OpARM64FCMPD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (FCMPD x (FMOVDconst [0])) // result: (FCMPD0 x) for { x := v_0 if v_1.Op != OpARM64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 { break } v.reset(OpARM64FCMPD0) v.AddArg(x) return true } // match: (FCMPD (FMOVDconst [0]) x) // result: (InvertFlags (FCMPD0 x)) for { if v_0.Op != OpARM64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0 { break } x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags) v0.AddArg(x) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64FCMPS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (FCMPS x (FMOVSconst [0])) // result: (FCMPS0 x) for { x := v_0 if v_1.Op != OpARM64FMOVSconst || auxIntToFloat64(v_1.AuxInt) != 0 { break } v.reset(OpARM64FCMPS0) v.AddArg(x) return true } // match: (FCMPS (FMOVSconst [0]) x) // result: (InvertFlags (FCMPS0 x)) for { if v_0.Op != OpARM64FMOVSconst || auxIntToFloat64(v_0.AuxInt) != 0 { break } x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags) v0.AddArg(x) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (FMOVDfpgp <t> (Arg [off] {sym})) // result: @b.Func.Entry (Arg <t> [off] {sym}) for { t := v.Type if v_0.Op != OpArg { break } off := auxIntToInt32(v_0.AuxInt) sym := auxToSym(v_0.Aux) b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) v0.AuxInt = int32ToAuxInt(off) v0.Aux = symToAux(sym) return true } return false } func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (FMOVDgpfp <t> (Arg [off] {sym})) // result: @b.Func.Entry (Arg <t> [off] {sym}) for { t := v.Type if v_0.Op != OpArg { break } off := auxIntToInt32(v_0.AuxInt) sym := auxToSym(v_0.Aux) b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) v0.AuxInt = int32ToAuxInt(off) v0.Aux = symToAux(sym) return true } return false } func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) // result: (FMOVDgpfp val) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] if ptr != v_1.Args[0] { break } v.reset(OpARM64FMOVDgpfp) v.AddArg(val) return true } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVDload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (FMOVDloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVDloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) // cond: off == 0 && sym == nil // result: (FMOVDloadidx8 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVDload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (FMOVDload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVDload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (FMOVDload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVDload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (FMOVDloadidx ptr (SLLconst [3] idx) mem) // result: (FMOVDloadidx8 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64FMOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVDloadidx (SLLconst [3] idx) ptr mem) // result: (FMOVDloadidx8 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64FMOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVDloadidx8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDloadidx8 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<3) // result: (FMOVDload ptr [int32(c)<<3] mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 3)) { break } v.reset(OpARM64FMOVDload) v.AuxInt = int32ToAuxInt(int32(c) << 3) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) // result: (MOVDstore [off] {sym} ptr val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVDgpfp { break } val := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (FMOVDstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVDstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) // cond: off == 0 && sym == nil // result: (FMOVDstoreidx8 ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (FMOVDstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (FMOVDstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (FMOVDstoreidx ptr (SLLconst [3] idx) val mem) // result: (FMOVDstoreidx8 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { break } idx := v_1.Args[0] val := v_2 mem := v_3 v.reset(OpARM64FMOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVDstoreidx (SLLconst [3] idx) ptr val mem) // result: (FMOVDstoreidx8 ptr idx val mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[0] ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64FMOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVDstoreidx8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) // cond: is32Bit(c<<3) // result: (FMOVDstore [int32(c)<<3] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c << 3)) { break } v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(int32(c) << 3) v.AddArg3(ptr, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) // result: (FMOVSgpfp val) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] if ptr != v_1.Args[0] { break } v.reset(OpARM64FMOVSgpfp) v.AddArg(val) return true } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVSload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (FMOVSloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVSloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil // result: (FMOVSloadidx4 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVSloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVSload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (FMOVSload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVSload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (FMOVSload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVSload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (FMOVSloadidx ptr (SLLconst [2] idx) mem) // result: (FMOVSloadidx4 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64FMOVSloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (FMOVSloadidx (SLLconst [2] idx) ptr mem) // result: (FMOVSloadidx4 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64FMOVSloadidx4) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSloadidx4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSloadidx4 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<2) // result: (FMOVSload ptr [int32(c)<<2] mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 2)) { break } v.reset(OpARM64FMOVSload) v.AuxInt = int32ToAuxInt(int32(c) << 2) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) // result: (MOVWstore [off] {sym} ptr val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVSgpfp { break } val := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (FMOVSstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVSstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) // cond: off == 0 && sym == nil // result: (FMOVSstoreidx4 ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64FMOVSstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (FMOVSstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (FMOVSstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (FMOVSstoreidx ptr (SLLconst [2] idx) val mem) // result: (FMOVSstoreidx4 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] val := v_2 mem := v_3 v.reset(OpARM64FMOVSstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } // match: (FMOVSstoreidx (SLLconst [2] idx) ptr val mem) // result: (FMOVSstoreidx4 ptr idx val mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64FMOVSstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMOVSstoreidx4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) // cond: is32Bit(c<<2) // result: (FMOVSstore [int32(c)<<2] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c << 2)) { break } v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(int32(c) << 2) v.AddArg3(ptr, val, mem) return true } return false } func rewriteValueARM64_OpARM64FMULD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMULD (FNEGD x) y) // result: (FNMULD x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGD { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULD) v.AddArg2(x, y) return true } break } return false } func rewriteValueARM64_OpARM64FMULS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMULS (FNEGS x) y) // result: (FNMULS x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGS { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64FNMULS) v.AddArg2(x, y) return true } break } return false } func rewriteValueARM64_OpARM64FNEGD(v *Value) bool { v_0 := v.Args[0] // match: (FNEGD (FMULD x y)) // result: (FNMULD x y) for { if v_0.Op != OpARM64FMULD { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULD) v.AddArg2(x, y) return true } // match: (FNEGD (FNMULD x y)) // result: (FMULD x y) for { if v_0.Op != OpARM64FNMULD { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULD) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64FNEGS(v *Value) bool { v_0 := v.Args[0] // match: (FNEGS (FMULS x y)) // result: (FNMULS x y) for { if v_0.Op != OpARM64FMULS { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FNMULS) v.AddArg2(x, y) return true } // match: (FNEGS (FNMULS x y)) // result: (FMULS x y) for { if v_0.Op != OpARM64FNMULS { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64FMULS) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64FNMULD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FNMULD (FNEGD x) y) // result: (FMULD x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGD { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULD) v.AddArg2(x, y) return true } break } return false } func rewriteValueARM64_OpARM64FNMULS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FNMULS (FNEGS x) y) // result: (FMULS x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64FNEGS { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64FMULS) v.AddArg2(x, y) return true } break } return false } func rewriteValueARM64_OpARM64FSUBD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FSUBD a (FMULD x y)) // cond: a.Block.Func.useFMA(v) // result: (FMSUBD a x y) for { a := v_0 if v_1.Op != OpARM64FMULD { break } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FMSUBD) v.AddArg3(a, x, y) return true } // match: (FSUBD (FMULD x y) a) // cond: a.Block.Func.useFMA(v) // result: (FNMSUBD a x y) for { if v_0.Op != OpARM64FMULD { break } y := v_0.Args[1] x := v_0.Args[0] a := v_1 if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FNMSUBD) v.AddArg3(a, x, y) return true } // match: (FSUBD a (FNMULD x y)) // cond: a.Block.Func.useFMA(v) // result: (FMADDD a x y) for { a := v_0 if v_1.Op != OpARM64FNMULD { break } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FMADDD) v.AddArg3(a, x, y) return true } // match: (FSUBD (FNMULD x y) a) // cond: a.Block.Func.useFMA(v) // result: (FNMADDD a x y) for { if v_0.Op != OpARM64FNMULD { break } y := v_0.Args[1] x := v_0.Args[0] a := v_1 if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FNMADDD) v.AddArg3(a, x, y) return true } return false } func rewriteValueARM64_OpARM64FSUBS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (FSUBS a (FMULS x y)) // cond: a.Block.Func.useFMA(v) // result: (FMSUBS a x y) for { a := v_0 if v_1.Op != OpARM64FMULS { break } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FMSUBS) v.AddArg3(a, x, y) return true } // match: (FSUBS (FMULS x y) a) // cond: a.Block.Func.useFMA(v) // result: (FNMSUBS a x y) for { if v_0.Op != OpARM64FMULS { break } y := v_0.Args[1] x := v_0.Args[0] a := v_1 if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FNMSUBS) v.AddArg3(a, x, y) return true } // match: (FSUBS a (FNMULS x y)) // cond: a.Block.Func.useFMA(v) // result: (FMADDS a x y) for { a := v_0 if v_1.Op != OpARM64FNMULS { break } y := v_1.Args[1] x := v_1.Args[0] if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FMADDS) v.AddArg3(a, x, y) return true } // match: (FSUBS (FNMULS x y) a) // cond: a.Block.Func.useFMA(v) // result: (FNMADDS a x y) for { if v_0.Op != OpARM64FNMULS { break } y := v_0.Args[1] x := v_0.Args[0] a := v_1 if !(a.Block.Func.useFMA(v)) { break } v.reset(OpARM64FNMADDS) v.AddArg3(a, x, y) return true } return false } func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (GreaterEqual (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (GreaterEqual (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterEqual (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (GreaterEqual (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterEqual (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterEqualNoov (CMNconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterEqualNoov (CMNWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMN x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMNW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterEqual (CMPconst [0] z:(MADD a x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMN a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (GreaterEqual (CMPconst [0] z:(MSUB a x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMP a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] z:(MADDW a x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMNW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (GreaterEqual (CMPWconst [0] z:(MSUBW a x y))) // cond: z.Uses == 1 // result: (GreaterEqualNoov (CMPW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterEqualNoov) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (GreaterEqual (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.ge())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.ge())) return true } // match: (GreaterEqual (InvertFlags x)) // result: (LessEqual x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessEqual) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool { v_0 := v.Args[0] // match: (GreaterEqualF (InvertFlags x)) // result: (LessEqualF x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessEqualF) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (GreaterEqualNoov (InvertFlags x)) // result: (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64CSINC) v.AuxInt = opToAuxInt(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = int64ToAuxInt(0) v.AddArg3(v0, v1, x) return true } return false } func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool { v_0 := v.Args[0] // match: (GreaterEqualU (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.uge())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.uge())) return true } // match: (GreaterEqualU (InvertFlags x)) // result: (LessEqualU x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessEqualU) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (GreaterThan (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (GreaterThan (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterThan (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterThan (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterThan (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (GreaterThan (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (GreaterThan (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (GreaterThan (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64GreaterThan) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (GreaterThan (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.gt())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.gt())) return true } // match: (GreaterThan (InvertFlags x)) // result: (LessThan x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessThan) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64GreaterThanF(v *Value) bool { v_0 := v.Args[0] // match: (GreaterThanF (InvertFlags x)) // result: (LessThanF x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessThanF) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool { v_0 := v.Args[0] // match: (GreaterThanU (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.ugt())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.ugt())) return true } // match: (GreaterThanU (InvertFlags x)) // result: (LessThanU x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64LessThanU) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LDP(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (LDP [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (LDP [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64LDP) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64LDP) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64LessEqual(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LessEqual (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (LessEqual (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessEqual (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (LessEqual (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessEqual (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (LessEqual (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessEqual (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (LessEqual (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessEqual (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.le())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.le())) return true } // match: (LessEqual (InvertFlags x)) // result: (GreaterEqual x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterEqual) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LessEqualF(v *Value) bool { v_0 := v.Args[0] // match: (LessEqualF (InvertFlags x)) // result: (GreaterEqualF x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterEqualF) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool { v_0 := v.Args[0] // match: (LessEqualU (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.ule())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.ule())) return true } // match: (LessEqualU (InvertFlags x)) // result: (GreaterEqualU x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterEqualU) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LessThan(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (LessThan (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (LessThan (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (LessThan (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (LessThan (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessThan (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (LessThan (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessThan (CMPconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (LessThanNoov (CMNconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (LessThanNoov (CMNWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (LessThan (CMPconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMN x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMNW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (LessThan (CMPconst [0] z:(MADD a x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMN a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (LessThan (CMPconst [0] z:(MSUB a x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMP a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] z:(MADDW a x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMNW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (LessThan (CMPWconst [0] z:(MSUBW a x y))) // cond: z.Uses == 1 // result: (LessThanNoov (CMPW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64LessThanNoov) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (LessThan (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.lt())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.lt())) return true } // match: (LessThan (InvertFlags x)) // result: (GreaterThan x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterThan) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LessThanF(v *Value) bool { v_0 := v.Args[0] // match: (LessThanF (InvertFlags x)) // result: (GreaterThanF x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterThanF) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (LessThanNoov (InvertFlags x)) // result: (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64CSEL0) v.AuxInt = opToAuxInt(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool) v0.AddArg(x) v.AddArg2(v0, x) return true } return false } func rewriteValueARM64_OpARM64LessThanU(v *Value) bool { v_0 := v.Args[0] // match: (LessThanU (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.ult())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.ult())) return true } // match: (LessThanU (InvertFlags x)) // result: (GreaterThanU x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64GreaterThanU) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MADD(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MADD a x (MOVDconst [-1])) // result: (SUB a x) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { break } v.reset(OpARM64SUB) v.AddArg2(a, x) return true } // match: (MADD a _ (MOVDconst [0])) // result: a for { a := v_0 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } v.copyOf(a) return true } // match: (MADD a x (MOVDconst [1])) // result: (ADD a x) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { break } v.reset(OpARM64ADD) v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (ADDshiftLL a x [log64(c)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg2(a, x) return true } // match: (MADD a x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && c>=3 // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c-1) && c >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && c>=7 // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c+1) && c >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [-1]) x) // result: (SUB a x) for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { break } x := v_2 v.reset(OpARM64SUB) v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [0]) _) // result: a for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } v.copyOf(a) return true } // match: (MADD a (MOVDconst [1]) x) // result: (ADD a x) for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } x := v_2 v.reset(OpARM64ADD) v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c) // result: (ADDshiftLL a x [log64(c)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg2(a, x) return true } // match: (MADD a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c-1) && c>=3 // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c-1) && c >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c+1) && c>=7 // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c+1) && c >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo64(c/3)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo64(c/5)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo64(c/7)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD a (MOVDconst [c]) x) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo64(c/9)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MADD (MOVDconst [c]) x y) // result: (ADDconst [c] (MUL <x.Type> x y)) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (MADD a (MOVDconst [c]) (MOVDconst [d])) // result: (ADDconst [c*d] a) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c * d) v.AddArg(a) return true } return false } func rewriteValueARM64_OpARM64MADDW(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MADDW a x (MOVDconst [c])) // cond: int32(c)==-1 // result: (MOVWUreg (SUB <a.Type> a x)) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == -1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVWUreg a) for { a := v_0 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 0) { break } v.reset(OpARM64MOVWUreg) v.AddArg(a) return true } // match: (MADDW a x (MOVDconst [c])) // cond: int32(c)==1 // result: (MOVWUreg (ADD <a.Type> a x)) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && int32(c)>=3 // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)]))) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c - 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && int32(c)>=7 // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)]))) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c + 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: int32(c)==-1 // result: (MOVWUreg (SUB <a.Type> a x)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == -1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) _) // cond: int32(c)==0 // result: (MOVWUreg a) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { break } v.reset(OpARM64MOVWUreg) v.AddArg(a) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: int32(c)==1 // result: (MOVWUreg (ADD <a.Type> a x)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == 1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c-1) && int32(c)>=3 // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)]))) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c - 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c+1) && int32(c)>=7 // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)]))) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c + 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) x) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MADDW (MOVDconst [c]) x y) // result: (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y))) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type) v0.AuxInt = int64ToAuxInt(c) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true } // match: (MADDW a (MOVDconst [c]) (MOVDconst [d])) // result: (MOVWUreg (ADDconst <a.Type> [c*d] a)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, a.Type) v0.AuxInt = int64ToAuxInt(c * d) v0.AddArg(a) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64MNEG(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MNEG x (MOVDconst [-1])) // result: x for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { continue } v.copyOf(x) return true } break } // match: (MNEG _ (MOVDconst [0])) // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { continue } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } break } // match: (MNEG x (MOVDconst [1])) // result: (NEG x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { continue } v.reset(OpARM64NEG) v.AddArg(x) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (NEG (SLLconst <x.Type> [log64(c)] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg(x) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && c >= 3 // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c-1) && c >= 3) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && c >= 7 // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c+1) && c >= 7) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MNEG x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [-c*d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-c * d) return true } break } return false } func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MNEGW x (MOVDconst [c])) // cond: int32(c)==-1 // result: (MOVWUreg x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == -1) { continue } v.reset(OpARM64MOVWUreg) v.AddArg(x) return true } break } // match: (MNEGW _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { continue } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: int32(c)==1 // result: (MOVWUreg (NEG <x.Type> x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 1) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (NEG (SLLconst <x.Type> [log64(c)] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg(x) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && int32(c) >= 3 // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c - 1)) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && int32(c) >= 7 // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c + 1)) v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v2.AddArg(x) v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v1.AuxInt = int64ToAuxInt(log64(c / 5)) v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v2.AuxInt = int64ToAuxInt(2) v2.AddArg2(x, x) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v1.AuxInt = int64ToAuxInt(log64(c / 9)) v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v2.AuxInt = int64ToAuxInt(3) v2.AddArg2(x, x) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(uint32(-c*d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(-c * d))) return true } break } return false } func rewriteValueARM64_OpARM64MOD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOD (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [c%d]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c % d) return true } return false } func rewriteValueARM64_OpARM64MODW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MODW (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint32(int32(c)%int32(d)))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) % int32(d)))) return true } return false } func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBUload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVBUloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVBUloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBUload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVBstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVBUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVDconst [int64(read8(sym, int64(off)))]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off)))) return true } return false } func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBUloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVBUload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVBUload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVBstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVBUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<8-1)] x) for { if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) v.AddArg(x) return true } // match: (MOVBUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint8(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint8(c))) return true } // match: (MOVBUreg x) // cond: v.Type.Size() <= 1 // result: x for { x := v_0 if !(v.Type.Size() <= 1) { break } v.copyOf(x) return true } // match: (MOVBUreg (SLLconst [lc] x)) // cond: lc >= 8 // result: (MOVDconst [0]) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) if !(lc >= 8) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVBUreg (SLLconst [lc] x)) // cond: lc < 8 // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 8) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc)) v.AddArg(x) return true } // match: (MOVBUreg (SRLconst [rc] x)) // cond: rc < 8 // result: (UBFX [armBFAuxInt(rc, 8)] x) for { if v_0.Op != OpARM64SRLconst { break } rc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(rc < 8) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8)) v.AddArg(x) return true } // match: (MOVBUreg (UBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 8 // result: (UBFX [bfc] x) for { if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 8) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVBload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVBloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVBloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVBstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVBload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVBload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVBstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVBreg (MOVDconst [c])) // result: (MOVDconst [int64(int8(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(int8(c))) return true } // match: (MOVBreg x) // cond: v.Type.Size() <= 1 // result: x for { x := v_0 if !(v.Type.Size() <= 1) { break } v.copyOf(x) return true } // match: (MOVBreg <t> (ANDconst x [c])) // cond: uint64(c) & uint64(0xffffffffffffff80) == 0 // result: (ANDconst <t> x [c]) for { t := v.Type if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(uint64(c)&uint64(0xffffffffffffff80) == 0) { break } v.reset(OpARM64ANDconst) v.Type = t v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } // match: (MOVBreg (SLLconst [lc] x)) // cond: lc < 8 // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 8) { break } v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc)) v.AddArg(x) return true } // match: (MOVBreg (SBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 8 // result: (SBFX [bfc] x) for { if v_0.Op != OpARM64SBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 8) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVBstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVBstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpARM64MOVBstorezero) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVBreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVBUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVBstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (MOVBstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (MOVBstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVBstorezeroidx ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVBstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVBreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVBUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVBstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVBstoreidx) v.AddArg4(ptr, idx, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBstorezero) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVBstorezero) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVBstorezeroidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVBstorezeroidx) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVBstorezero [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) // cond: is32Bit(c) // result: (MOVBstorezero [int32(c)] idx mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVBstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) // result: (FMOVDfpgp val) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] if ptr != v_1.Args[0] { break } v.reset(OpARM64FMOVDfpgp) v.AddArg(val) return true } // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVDloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVDloadidx8 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVDload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false } func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVDload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVDload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) // result: (MOVDloadidx8 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) // result: (MOVDloadidx8 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVDloadidx8) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<3) // result: (MOVDload [int32(c)<<3] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 3)) { break } v.reset(OpARM64MOVDload) v.AuxInt = int32ToAuxInt(int32(c) << 3) v.AddArg2(ptr, mem) return true } // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDstorezeroidx8 { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVDnop(v *Value) bool { v_0 := v.Args[0] // match: (MOVDnop (MOVDconst [c])) // result: (MOVDconst [c]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c) return true } return false } func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVDreg x) // cond: x.Uses == 1 // result: (MOVDnop x) for { x := v_0 if !(x.Uses == 1) { break } v.reset(OpARM64MOVDnop) v.AddArg(x) return true } // match: (MOVDreg (MOVDconst [c])) // result: (MOVDconst [c]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c) return true } return false } func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) // result: (FMOVDstore [off] {sym} ptr val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVDfpgp { break } val := v_1.Args[0] mem := v_2 v.reset(OpARM64FMOVDstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVDstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVDstoreidx8 ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVDstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (MOVDstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (MOVDstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) // result: (MOVDstoreidx8 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { break } idx := v_1.Args[0] val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) // result: (MOVDstoreidx8 ptr idx val mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[0] ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64MOVDstoreidx8) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVDstorezeroidx ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVDstorezeroidx) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) // cond: is32Bit(c<<3) // result: (MOVDstore [int32(c)<<3] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c << 3)) { break } v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(int32(c) << 3) v.AddArg3(ptr, val, mem) return true } // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x) // result: (MOVQstorezero {s} [i] ptr mem) for { i := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) ptr := v_0 x := v_1 if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i+8 || auxToSym(x.Aux) != s { break } mem := x.Args[1] if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) { break } v.reset(OpARM64MOVQstorezero) v.AuxInt = int32ToAuxInt(i) v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x) // result: (MOVQstorezero {s} [i-8] ptr mem) for { i := auxIntToInt32(v.AuxInt) s := auxToSym(v.Aux) ptr := v_0 x := v_1 if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s { break } mem := x.Args[1] if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) { break } v.reset(OpARM64MOVQstorezero) v.AuxInt = int32ToAuxInt(i - 8) v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVDstorezeroidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVDstorezeroidx8 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVDstorezeroidx8) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVDstorezero [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) // cond: is32Bit(c) // result: (MOVDstorezero [int32(c)] idx mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(idx, mem) return true } // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg3(ptr, idx, mem) return true } // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) // result: (MOVDstorezeroidx8 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVDstorezeroidx8) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<3) // result: (MOVDstorezero [int32(c<<3)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 3)) { break } v.reset(OpARM64MOVDstorezero) v.AuxInt = int32ToAuxInt(int32(c << 3)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHUload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHUloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHUloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHUloadidx2 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHUloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHUload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVHUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false } func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHUloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVHUload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVHUload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) // result: (MOVHUloadidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr (ADD idx idx) mem) // result: (MOVHUloadidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64ADD { break } idx := v_1.Args[1] if idx != v_1.Args[0] { break } mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx (ADD idx idx) ptr mem) // result: (MOVHUloadidx2 ptr idx mem) for { if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] if idx != v_0.Args[0] { break } ptr := v_1 mem := v_2 v.reset(OpARM64MOVHUloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<1) // result: (MOVHUload [int32(c)<<1] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 1)) { break } v.reset(OpARM64MOVHUload) v.AuxInt = int32ToAuxInt(int32(c) << 1) v.AddArg2(ptr, mem) return true } // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx2 { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVHUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<16-1)] x) for { if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) v.AddArg(x) return true } // match: (MOVHUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint16(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint16(c))) return true } // match: (MOVHUreg x) // cond: v.Type.Size() <= 2 // result: x for { x := v_0 if !(v.Type.Size() <= 2) { break } v.copyOf(x) return true } // match: (MOVHUreg (SLLconst [lc] x)) // cond: lc >= 16 // result: (MOVDconst [0]) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) if !(lc >= 16) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVHUreg (SLLconst [lc] x)) // cond: lc < 16 // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 16) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc)) v.AddArg(x) return true } // match: (MOVHUreg (SRLconst [rc] x)) // cond: rc < 16 // result: (UBFX [armBFAuxInt(rc, 16)] x) for { if v_0.Op != OpARM64SRLconst { break } rc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(rc < 16) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16)) v.AddArg(x) return true } // match: (MOVHUreg (UBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 16 // result: (UBFX [bfc] x) for { if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 16) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVHload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHloadidx2 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVHload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVHload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) // result: (MOVHloadidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr (ADD idx idx) mem) // result: (MOVHloadidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64ADD { break } idx := v_1.Args[1] if idx != v_1.Args[0] { break } mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx (ADD idx idx) ptr mem) // result: (MOVHloadidx2 ptr idx mem) for { if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] if idx != v_0.Args[0] { break } ptr := v_1 mem := v_2 v.reset(OpARM64MOVHloadidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<1) // result: (MOVHload [int32(c)<<1] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 1)) { break } v.reset(OpARM64MOVHload) v.AuxInt = int32ToAuxInt(int32(c) << 1) v.AddArg2(ptr, mem) return true } // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHstorezeroidx2 { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVHreg (MOVDconst [c])) // result: (MOVDconst [int64(int16(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(int16(c))) return true } // match: (MOVHreg x) // cond: v.Type.Size() <= 2 // result: x for { x := v_0 if !(v.Type.Size() <= 2) { break } v.copyOf(x) return true } // match: (MOVHreg <t> (ANDconst x [c])) // cond: uint64(c) & uint64(0xffffffffffff8000) == 0 // result: (ANDconst <t> x [c]) for { t := v.Type if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(uint64(c)&uint64(0xffffffffffff8000) == 0) { break } v.reset(OpARM64ANDconst) v.Type = t v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } // match: (MOVHreg (SLLconst [lc] x)) // cond: lc < 16 // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 16) { break } v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc)) v.AddArg(x) return true } // match: (MOVHreg (SBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 16 // result: (SBFX [bfc] x) for { if v_0.Op != OpARM64SBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 16) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVHstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVHstoreidx2 ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVHstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVHUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVHstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (MOVHstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (MOVHstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { break } idx := v_1.Args[0] val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr (ADD idx idx) val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64ADD { break } idx := v_1.Args[1] if idx != v_1.Args[0] { break } val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[0] ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx (ADD idx idx) ptr val mem) // result: (MOVHstoreidx2 ptr idx val mem) for { if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] if idx != v_0.Args[0] { break } ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVHstorezeroidx ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVHstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVHstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx) v.AddArg4(ptr, idx, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) // cond: is32Bit(c<<1) // result: (MOVHstore [int32(c)<<1] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c << 1)) { break } v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(int32(c) << 1) v.AddArg3(ptr, val, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVHUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) // result: (MOVHstoreidx2 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVHstoreidx2) v.AddArg4(ptr, idx, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHstorezeroidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVHstorezeroidx2 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVHstorezero [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) // cond: is32Bit(c) // result: (MOVHstorezero [int32(c)] idx mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(idx, mem) return true } // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64ADD { break } idx := v_1.Args[1] if idx != v_1.Args[0] { break } mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) // result: (MOVHstorezeroidx2 ptr idx mem) for { if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] if idx != v_0.Args[0] { break } ptr := v_1 mem := v_2 v.reset(OpARM64MOVHstorezeroidx2) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<1) // result: (MOVHstorezero [int32(c<<1)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 1)) { break } v.reset(OpARM64MOVHstorezero) v.AuxInt = int32ToAuxInt(int32(c << 1)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVQstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVQstorezero) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVQstorezero) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) // result: (FMOVSfpgp val) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] if ptr != v_1.Args[0] { break } v.reset(OpARM64FMOVSfpgp) v.AddArg(val) return true } // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWUload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWUloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWUloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWUloadidx4 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWUloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWUload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVWUload [off] {sym} (SB) _) // cond: symIsRO(sym) // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false } func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWUloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVWUload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVWUload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWUload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) // result: (MOVWUloadidx4 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWUloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) // result: (MOVWUloadidx4 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVWUloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<2) // result: (MOVWUload [int32(c)<<2] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 2)) { break } v.reset(OpARM64MOVWUload) v.AuxInt = int32ToAuxInt(int32(c) << 2) v.AddArg2(ptr, mem) return true } // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx4 { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVWUreg (ANDconst [c] x)) // result: (ANDconst [c&(1<<32-1)] x) for { if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) v.AddArg(x) return true } // match: (MOVWUreg (MOVDconst [c])) // result: (MOVDconst [int64(uint32(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(c))) return true } // match: (MOVWUreg x) // cond: v.Type.Size() <= 4 // result: x for { x := v_0 if !(v.Type.Size() <= 4) { break } v.copyOf(x) return true } // match: (MOVWUreg (SLLconst [lc] x)) // cond: lc >= 32 // result: (MOVDconst [0]) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) if !(lc >= 32) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (MOVWUreg (SLLconst [lc] x)) // cond: lc < 32 // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 32) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc)) v.AddArg(x) return true } // match: (MOVWUreg (SRLconst [rc] x)) // cond: rc < 32 // result: (UBFX [armBFAuxInt(rc, 32)] x) for { if v_0.Op != OpARM64SRLconst { break } rc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(rc < 32) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32)) v.AddArg(x) return true } // match: (MOVWUreg (UBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 32 // result: (UBFX [bfc] x) for { if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 32) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWload) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWloadidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWloadidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWloadidx4 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWload) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWstorezero { break } off2 := auxIntToInt32(v_1.AuxInt) sym2 := auxToSym(v_1.Aux) ptr2 := v_1.Args[0] if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWloadidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVWload [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx (MOVDconst [c]) ptr mem) // cond: is32Bit(c) // result: (MOVWload [int32(c)] ptr mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) ptr := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWload) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) // result: (MOVWloadidx4 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) // result: (MOVWloadidx4 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVWloadidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<2) // result: (MOVWload [int32(c)<<2] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 2)) { break } v.reset(OpARM64MOVWload) v.AuxInt = int32ToAuxInt(int32(c) << 2) v.AddArg2(ptr, mem) return true } // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWstorezeroidx4 { break } idx2 := v_2.Args[1] ptr2 := v_2.Args[0] if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { v_0 := v.Args[0] // match: (MOVWreg (MOVDconst [c])) // result: (MOVDconst [int64(int32(c))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(int32(c))) return true } // match: (MOVWreg x) // cond: v.Type.Size() <= 4 // result: x for { x := v_0 if !(v.Type.Size() <= 4) { break } v.copyOf(x) return true } // match: (MOVWreg <t> (ANDconst x [c])) // cond: uint64(c) & uint64(0xffffffff80000000) == 0 // result: (ANDconst <t> x [c]) for { t := v.Type if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(uint64(c)&uint64(0xffffffff80000000) == 0) { break } v.reset(OpARM64ANDconst) v.Type = t v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } // match: (MOVWreg (SLLconst [lc] x)) // cond: lc < 32 // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x) for { if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < 32) { break } v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc)) v.AddArg(x) return true } // match: (MOVWreg (SBFX [bfc] x)) // cond: bfc.getARM64BFwidth() <= 32 // result: (SBFX [bfc] x) for { if v_0.Op != OpARM64SBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(bfc.getARM64BFwidth() <= 32) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) // result: (FMOVSstore [off] {sym} ptr val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64FMOVSfpgp { break } val := v_1.Args[0] mem := v_2 v.reset(OpARM64FMOVSstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVWstoreidx ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWstoreidx) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) // cond: off == 0 && sym == nil // result: (MOVWstoreidx4 ptr idx val mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] val := v_1 mem := v_2 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) // result: (MOVWstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } mem := v_2 v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) // result: (MOVWstore [off] {sym} ptr x mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVWUreg { break } x := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) // cond: is32Bit(c) // result: (MOVWstore [int32(c)] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) // cond: is32Bit(c) // result: (MOVWstore [int32(c)] idx val mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 val := v_2 mem := v_3 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg3(idx, val, mem) return true } // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) // result: (MOVWstoreidx4 ptr idx val mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) // result: (MOVWstoreidx4 ptr idx val mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 val := v_2 mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg4(ptr, idx, val, mem) return true } // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) // result: (MOVWstorezeroidx ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVWstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) // result: (MOVWstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) // result: (MOVWstoreidx ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx) v.AddArg4(ptr, idx, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) // cond: is32Bit(c<<2) // result: (MOVWstore [int32(c)<<2] ptr val mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) val := v_2 mem := v_3 if !(is32Bit(c << 2)) { break } v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(int32(c) << 2) v.AddArg3(ptr, val, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) // result: (MOVWstoreidx4 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg4(ptr, idx, x, mem) return true } // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) // result: (MOVWstoreidx4 ptr idx x mem) for { ptr := v_0 idx := v_1 if v_2.Op != OpARM64MOVWUreg { break } x := v_2.Args[0] mem := v_3 v.reset(OpARM64MOVWstoreidx4) v.AddArg4(ptr, idx, x, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWstorezeroidx ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADD { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWstorezeroidx) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil // result: (MOVWstorezeroidx4 ptr idx mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[1] ptr := v_0.Args[0] mem := v_1 if !(off == 0 && sym == nil) { break } v.reset(OpARM64MOVWstorezeroidx4) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem) // cond: is32Bit(c) // result: (MOVWstorezero [int32(c)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(ptr, mem) return true } // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) // cond: is32Bit(c) // result: (MOVWstorezero [int32(c)] idx mem) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) idx := v_1 mem := v_2 if !(is32Bit(c)) { break } v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg2(idx, mem) return true } // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { ptr := v_0 if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 { break } idx := v_1.Args[0] mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg3(ptr, idx, mem) return true } // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) // result: (MOVWstorezeroidx4 ptr idx mem) for { if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 { break } idx := v_0.Args[0] ptr := v_1 mem := v_2 v.reset(OpARM64MOVWstorezeroidx4) v.AddArg3(ptr, idx, mem) return true } return false } func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) // cond: is32Bit(c<<2) // result: (MOVWstorezero [int32(c<<2)] ptr mem) for { ptr := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) mem := v_2 if !(is32Bit(c << 2)) { break } v.reset(OpARM64MOVWstorezero) v.AuxInt = int32ToAuxInt(int32(c << 2)) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64MSUB(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MSUB a x (MOVDconst [-1])) // result: (ADD a x) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { break } v.reset(OpARM64ADD) v.AddArg2(a, x) return true } // match: (MSUB a _ (MOVDconst [0])) // result: a for { a := v_0 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } v.copyOf(a) return true } // match: (MSUB a x (MOVDconst [1])) // result: (SUB a x) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { break } v.reset(OpARM64SUB) v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (SUBshiftLL a x [log64(c)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg2(a, x) return true } // match: (MSUB a x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && c>=3 // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c-1) && c >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && c>=7 // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c+1) && c >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [-1]) x) // result: (ADD a x) for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { break } x := v_2 v.reset(OpARM64ADD) v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [0]) _) // result: a for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } v.copyOf(a) return true } // match: (MSUB a (MOVDconst [1]) x) // result: (SUB a x) for { a := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } x := v_2 v.reset(OpARM64SUB) v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c) // result: (SUBshiftLL a x [log64(c)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg2(a, x) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c-1) && c>=3 // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c-1) && c >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c+1) && c>=7 // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c+1) && c >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo64(c/3)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo64(c/5)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo64(c/7)) { break } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB a (MOVDconst [c]) x) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo64(c/9)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(log64(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true } // match: (MSUB (MOVDconst [c]) x y) // result: (ADDconst [c] (MNEG <x.Type> x y)) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (MSUB a (MOVDconst [c]) (MOVDconst [d])) // result: (SUBconst [c*d] a) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(c * d) v.AddArg(a) return true } return false } func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MSUBW a x (MOVDconst [c])) // cond: int32(c)==-1 // result: (MOVWUreg (ADD <a.Type> a x)) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == -1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVWUreg a) for { a := v_0 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 0) { break } v.reset(OpARM64MOVWUreg) v.AddArg(a) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: int32(c)==1 // result: (MOVWUreg (SUB <a.Type> a x)) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && int32(c)>=3 // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)]))) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c - 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && int32(c)>=7 // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)]))) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c + 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])) for { a := v_0 x := v_1 if v_2.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: int32(c)==-1 // result: (MOVWUreg (ADD <a.Type> a x)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == -1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) _) // cond: int32(c)==0 // result: (MOVWUreg a) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { break } v.reset(OpARM64MOVWUreg) v.AddArg(a) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: int32(c)==1 // result: (MOVWUreg (SUB <a.Type> a x)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == 1) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg2(a, x) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c-1) && int32(c)>=3 // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)]))) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c - 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: isPowerOfTwo64(c+1) && int32(c)>=7 // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)]))) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(log64(c + 1)) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) x) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { break } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (MSUBW (MOVDconst [c]) x y) // result: (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y))) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type) v0.AuxInt = int64ToAuxInt(c) v1 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true } // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d])) // result: (MOVWUreg (SUBconst <a.Type> [c*d] a)) for { a := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SUBconst, a.Type) v0.AuxInt = int64ToAuxInt(c * d) v0.AddArg(a) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpARM64MUL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MUL (NEG x) y) // result: (MNEG x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64NEG { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEG) v.AddArg2(x, y) return true } break } // match: (MUL x (MOVDconst [-1])) // result: (NEG x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { continue } v.reset(OpARM64NEG) v.AddArg(x) return true } break } // match: (MUL _ (MOVDconst [0])) // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { continue } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } break } // match: (MUL x (MOVDconst [1])) // result: x for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { continue } v.copyOf(x) return true } break } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (SLLconst [log64(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { continue } v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg(x) return true } break } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && c >= 3 // result: (ADDshiftLL x x [log64(c-1)]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c-1) && c >= 3) { continue } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c - 1)) v.AddArg2(x, x) return true } break } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && c >= 7 // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c+1) && c >= 7) { continue } v.reset(OpARM64ADDshiftLL) v.AuxInt = int64ToAuxInt(log64(c + 1)) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) v.AddArg2(v0, x) return true } break } // match: (MUL x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3)) { continue } v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(log64(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(1) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MUL x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5)) { continue } v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(log64(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MUL x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7)) { continue } v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(log64(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) v.AddArg(v0) return true } break } // match: (MUL x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9)) { continue } v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(log64(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MUL (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c*d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c * d) return true } break } return false } func rewriteValueARM64_OpARM64MULW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (MULW (NEG x) y) // result: (MNEGW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64NEG { continue } x := v_0.Args[0] y := v_1 v.reset(OpARM64MNEGW) v.AddArg2(x, y) return true } break } // match: (MULW x (MOVDconst [c])) // cond: int32(c)==-1 // result: (MOVWUreg (NEG <x.Type> x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == -1) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) v.AddArg(v0) return true } break } // match: (MULW _ (MOVDconst [c])) // cond: int32(c)==0 // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { continue } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: int32(c)==1 // result: (MOVWUreg x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 1) { continue } v.reset(OpARM64MOVWUreg) v.AddArg(x) return true } break } // match: (MULW x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c)] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c)) v0.AddArg(x) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: isPowerOfTwo64(c-1) && int32(c) >= 3 // result: (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c-1) && int32(c) >= 3) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c - 1)) v0.AddArg2(x, x) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: isPowerOfTwo64(c+1) && int32(c) >= 7 // result: (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c+1) && int32(c) >= 7) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = int64ToAuxInt(log64(c + 1)) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 3)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(1) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 7)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v2.AddArg(x) v1.AddArg2(v2, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MULW x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) // result: (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) { continue } v.reset(OpARM64MOVWUreg) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(log64(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) return true } break } // match: (MULW (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [int64(uint32(c*d))]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(c * d))) return true } break } return false } func rewriteValueARM64_OpARM64MVN(v *Value) bool { v_0 := v.Args[0] // match: (MVN (XOR x y)) // result: (EON x y) for { if v_0.Op != OpARM64XOR { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64EON) v.AddArg2(x, y) return true } // match: (MVN (MOVDconst [c])) // result: (MOVDconst [^c]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(^c) return true } // match: (MVN x:(SLLconst [c] y)) // cond: clobberIfDead(x) // result: (MVNshiftLL [c] y) for { x := v_0 if x.Op != OpARM64SLLconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64MVNshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } // match: (MVN x:(SRLconst [c] y)) // cond: clobberIfDead(x) // result: (MVNshiftRL [c] y) for { x := v_0 if x.Op != OpARM64SRLconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64MVNshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } // match: (MVN x:(SRAconst [c] y)) // cond: clobberIfDead(x) // result: (MVNshiftRA [c] y) for { x := v_0 if x.Op != OpARM64SRAconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64MVNshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } // match: (MVN x:(RORconst [c] y)) // cond: clobberIfDead(x) // result: (MVNshiftRO [c] y) for { x := v_0 if x.Op != OpARM64RORconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64MVNshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } return false } func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool { v_0 := v.Args[0] // match: (MVNshiftLL (MOVDconst [c]) [d]) // result: (MOVDconst [^int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d))) return true } return false } func rewriteValueARM64_OpARM64MVNshiftRA(v *Value) bool { v_0 := v.Args[0] // match: (MVNshiftRA (MOVDconst [c]) [d]) // result: (MOVDconst [^(c>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) return true } return false } func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool { v_0 := v.Args[0] // match: (MVNshiftRL (MOVDconst [c]) [d]) // result: (MOVDconst [^int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) return true } return false } func rewriteValueARM64_OpARM64MVNshiftRO(v *Value) bool { v_0 := v.Args[0] // match: (MVNshiftRO (MOVDconst [c]) [d]) // result: (MOVDconst [^rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) return true } return false } func rewriteValueARM64_OpARM64NEG(v *Value) bool { v_0 := v.Args[0] // match: (NEG (MUL x y)) // result: (MNEG x y) for { if v_0.Op != OpARM64MUL { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MNEG) v.AddArg2(x, y) return true } // match: (NEG (MULW x y)) // cond: v.Type.Size() <= 4 // result: (MNEGW x y) for { if v_0.Op != OpARM64MULW { break } y := v_0.Args[1] x := v_0.Args[0] if !(v.Type.Size() <= 4) { break } v.reset(OpARM64MNEGW) v.AddArg2(x, y) return true } // match: (NEG (NEG x)) // result: x for { if v_0.Op != OpARM64NEG { break } x := v_0.Args[0] v.copyOf(x) return true } // match: (NEG (MOVDconst [c])) // result: (MOVDconst [-c]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-c) return true } // match: (NEG x:(SLLconst [c] y)) // cond: clobberIfDead(x) // result: (NEGshiftLL [c] y) for { x := v_0 if x.Op != OpARM64SLLconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64NEGshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } // match: (NEG x:(SRLconst [c] y)) // cond: clobberIfDead(x) // result: (NEGshiftRL [c] y) for { x := v_0 if x.Op != OpARM64SRLconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64NEGshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } // match: (NEG x:(SRAconst [c] y)) // cond: clobberIfDead(x) // result: (NEGshiftRA [c] y) for { x := v_0 if x.Op != OpARM64SRAconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(clobberIfDead(x)) { break } v.reset(OpARM64NEGshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg(y) return true } return false } func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool { v_0 := v.Args[0] // match: (NEGshiftLL (MOVDconst [c]) [d]) // result: (MOVDconst [-int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d))) return true } return false } func rewriteValueARM64_OpARM64NEGshiftRA(v *Value) bool { v_0 := v.Args[0] // match: (NEGshiftRA (MOVDconst [c]) [d]) // result: (MOVDconst [-(c>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-(c >> uint64(d))) return true } return false } func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool { v_0 := v.Args[0] // match: (NEGshiftRL (MOVDconst [c]) [d]) // result: (MOVDconst [-int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d))) return true } return false } func rewriteValueARM64_OpARM64NotEqual(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (NotEqual (CMPconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (NotEqual (TST x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (NotEqual (TSTWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] z:(AND x y))) // cond: z.Uses == 1 // result: (NotEqual (TSTW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPconst [0] x:(ANDconst [c] y))) // cond: x.Uses == 1 // result: (NotEqual (TSTconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (NotEqual (CMP x z:(NEG y))) // cond: z.Uses == 1 // result: (NotEqual (CMN x y)) for { if v_0.Op != OpARM64CMP { break } _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPW x z:(NEG y))) // cond: z.Uses == 1 // result: (NotEqual (CMNW x y)) for { if v_0.Op != OpARM64CMPW { break } _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (NotEqual (CMNconst [c] y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] x:(ADDconst [c] y))) // cond: x.Uses == 1 // result: (NotEqual (CMNWconst [int32(c)] y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) v.AddArg(v0) return true } // match: (NotEqual (CMPconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (NotEqual (CMN x y)) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] z:(ADD x y))) // cond: z.Uses == 1 // result: (NotEqual (CMNW x y)) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } y := z.Args[1] x := z.Args[0] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } // match: (NotEqual (CMPconst [0] z:(MADD a x y))) // cond: z.Uses == 1 // result: (NotEqual (CMN a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (NotEqual (CMPconst [0] z:(MSUB a x y))) // cond: z.Uses == 1 // result: (NotEqual (CMP a (MUL <x.Type> x y))) for { if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] z:(MADDW a x y))) // cond: z.Uses == 1 // result: (NotEqual (CMNW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (NotEqual (CMPWconst [0] z:(MSUBW a x y))) // cond: z.Uses == 1 // result: (NotEqual (CMPW a (MULW <x.Type> x y))) for { if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) v.AddArg(v0) return true } // match: (NotEqual (FlagConstant [fc])) // result: (MOVDconst [b2i(fc.ne())]) for { if v_0.Op != OpARM64FlagConstant { break } fc := auxIntToFlagConstant(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(fc.ne())) return true } // match: (NotEqual (InvertFlags x)) // result: (NotEqual x) for { if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] v.reset(OpARM64NotEqual) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64OR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (OR x (MOVDconst [c])) // result: (ORconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (OR x x) // result: x for { x := v_0 if x != v_1 { break } v.copyOf(x) return true } // match: (OR x (MVN y)) // result: (ORN x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MVN { continue } y := v_1.Args[0] v.reset(OpARM64ORN) v.AddArg2(x, y) return true } break } // match: (OR x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ORshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (OR x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ORshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (OR x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ORshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (OR x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (ORshiftRO x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64ORshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb())) // result: (BFI [bfc] y x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64UBFIZ { continue } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpARM64ANDconst { continue } ac := auxIntToInt64(v_1.AuxInt) y := v_1.Args[0] if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) { continue } v.reset(OpARM64BFI) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg2(y, x) return true } break } // match: (OR (UBFX [bfc] x) (ANDconst [ac] y)) // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1) // result: (BFXIL [bfc] y x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARM64UBFX { continue } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpARM64ANDconst { continue } ac := auxIntToInt64(v_1.AuxInt) y := v_1.Args[0] if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) { continue } v.reset(OpARM64BFXIL) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg2(y, x) return true } break } return false } func rewriteValueARM64_OpARM64ORN(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORN x (MOVDconst [c])) // result: (ORconst [^c] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } // match: (ORN x x) // result: (MOVDconst [-1]) for { x := v_0 if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORN x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORNshiftLL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64ORNshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (ORNshiftRL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64ORNshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (ORNshiftRA x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64ORNshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (ORN x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (ORNshiftRO x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64ORNshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } return false } func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORNshiftLL x (MOVDconst [c]) [d]) // result: (ORconst x [^int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (ORNshiftLL (SLLconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORNshiftRA x (MOVDconst [c]) [d]) // result: (ORconst x [^(c>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(^(c >> uint64(d))) v.AddArg(x) return true } // match: (ORNshiftRA (SRAconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORNshiftRL x (MOVDconst [c]) [d]) // result: (ORconst x [^int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (ORNshiftRL (SRLconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64ORNshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ORNshiftRO x (MOVDconst [c]) [d]) // result: (ORconst x [^rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(^rotateRight64(c, d)) v.AddArg(x) return true } // match: (ORNshiftRO (RORconst x [c]) x [c]) // result: (MOVDconst [-1]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } return false } func rewriteValueARM64_OpARM64ORconst(v *Value) bool { v_0 := v.Args[0] // match: (ORconst [0] x) // result: x for { if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 v.copyOf(x) return true } // match: (ORconst [-1] _) // result: (MOVDconst [-1]) for { if auxIntToInt64(v.AuxInt) != -1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORconst [c] (MOVDconst [d])) // result: (MOVDconst [c|d]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c | d) return true } // match: (ORconst [c] (ORconst [d] x)) // result: (ORconst [c|d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ORconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c | d) v.AddArg(x) return true } // match: (ORconst [c1] (ANDconst [c2] x)) // cond: c2|c1 == ^0 // result: (ORconst [c1] x) for { c1 := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(c2|c1 == ^0) { break } v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c1) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (ORshiftLL (MOVDconst [c]) x [d]) // result: (ORconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ORshiftLL x (MOVDconst [c]) [d]) // result: (ORconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (ORshiftLL y:(SLLconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff // result: (REV16W x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) // result: (REV16 x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { break } v.reset(OpARM64REV16) v.AddArg(x) return true } // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) // result: (REV16 (ANDconst <x.Type> [0xffffffff] x)) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16) v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) v0.AuxInt = int64ToAuxInt(0xffffffff) v0.AddArg(x) v.AddArg(v0) return true } // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { break } x := v_0.Args[0] x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = int64ToAuxInt(64 - c) v.AddArg2(x2, x) return true } // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x2) // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) v.AuxInt = int64ToAuxInt(32 - c) v.AddArg2(x2, x) return true } // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) // cond: sc == bfc.getARM64BFwidth() // result: (BFXIL [bfc] y x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc { break } y := v_1.Args[0] if !(sc == bfc.getARM64BFwidth()) { break } v.reset(OpARM64BFXIL) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg2(y, x) return true } return false } func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ORshiftRA (MOVDconst [c]) x [d]) // result: (ORconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ORshiftRA x (MOVDconst [c]) [d]) // result: (ORconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } // match: (ORshiftRA y:(SRAconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ORshiftRL (MOVDconst [c]) x [d]) // result: (ORconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ORshiftRL x (MOVDconst [c]) [d]) // result: (ORconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (ORshiftRL y:(SRLconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) // cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc)) // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } ac := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if v_1.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_1.AuxInt) y := v_1.Args[0] if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) { break } v.reset(OpARM64BFI) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc)) v.AddArg2(x, y) return true } // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) // cond: lc < rc && ac == ^((1<<uint(64-rc)-1)) // result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } ac := auxIntToInt64(v_0.AuxInt) y := v_0.Args[0] if v_1.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_1.AuxInt) x := v_1.Args[0] if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) { break } v.reset(OpARM64BFXIL) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc)) v.AddArg2(y, x) return true } return false } func rewriteValueARM64_OpARM64ORshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (ORshiftRO (MOVDconst [c]) x [d]) // result: (ORconst [c] (RORconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (ORshiftRO x (MOVDconst [c]) [d]) // result: (ORconst x [rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) v.AddArg(x) return true } // match: (ORshiftRO y:(RORconst x [c]) x [c]) // result: y for { c := auxIntToInt64(v.AuxInt) y := v_0 if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c { break } x := y.Args[0] if x != v_1 { break } v.copyOf(y) return true } return false } func rewriteValueARM64_OpARM64REV(v *Value) bool { v_0 := v.Args[0] // match: (REV (REV p)) // result: p for { if v_0.Op != OpARM64REV { break } p := v_0.Args[0] v.copyOf(p) return true } return false } func rewriteValueARM64_OpARM64REVW(v *Value) bool { v_0 := v.Args[0] // match: (REVW (REVW p)) // result: p for { if v_0.Op != OpARM64REVW { break } p := v_0.Args[0] v.copyOf(p) return true } return false } func rewriteValueARM64_OpARM64ROR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ROR x (MOVDconst [c])) // result: (RORconst x [c&63]) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64RORconst) v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64RORW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (RORW x (MOVDconst [c])) // result: (RORWconst x [c&31]) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64RORWconst) v.AuxInt = int64ToAuxInt(c & 31) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) // result: (SBCSflags x y bo) for { x := v_0 y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } v_2_0 := v_2.Args[0] if v_2_0.Op != OpARM64NEGSflags { break } v_2_0_0 := v_2_0.Args[0] if v_2_0_0.Op != OpARM64NEG || v_2_0_0.Type != typ.UInt64 { break } v_2_0_0_0 := v_2_0_0.Args[0] if v_2_0_0_0.Op != OpARM64NGCzerocarry || v_2_0_0_0.Type != typ.UInt64 { break } bo := v_2_0_0_0.Args[0] v.reset(OpARM64SBCSflags) v.AddArg3(x, y, bo) return true } // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) // result: (SUBSflags x y) for { x := v_0 y := v_1 if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags { break } v_2_0 := v_2.Args[0] if v_2_0.Op != OpARM64NEGSflags { break } v_2_0_0 := v_2_0.Args[0] if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { break } v.reset(OpARM64SUBSflags) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64SLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SLL x (MOVDconst [c])) // result: (SLLconst x [c&63]) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SLLconst) v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } // match: (SLL x (ANDconst [63] y)) // result: (SLL x y) for { x := v_0 if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 { break } y := v_1.Args[0] v.reset(OpARM64SLL) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64SLLconst(v *Value) bool { v_0 := v.Args[0] // match: (SLLconst [c] (MOVDconst [d])) // result: (MOVDconst [d<<uint64(c)]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(d << uint64(c)) return true } // match: (SLLconst [c] (SRLconst [c] x)) // cond: 0 < c && c < 64 // result: (ANDconst [^(1<<uint(c)-1)] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if !(0 < c && c < 64) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1)) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVWreg x)) // result: (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWreg { break } x := v_0.Args[0] v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVHreg x)) // result: (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHreg { break } x := v_0.Args[0] v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVBreg x)) // result: (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBreg { break } x := v_0.Args[0] v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVWUreg x)) // result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg { break } x := v_0.Args[0] v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVHUreg x)) // result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg { break } x := v_0.Args[0] v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [lc] (MOVBUreg x)) // result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x) for { lc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg { break } x := v_0.Args[0] v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc))) v.AddArg(x) return true } // match: (SLLconst [sc] (ANDconst [ac] x)) // cond: isARM64BFMask(sc, ac, 0) // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } ac := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(sc, ac, 0)) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0))) v.AddArg(x) return true } // match: (SLLconst [sc] (UBFIZ [bfc] x)) // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64 // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64SRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRA x (MOVDconst [c])) // result: (SRAconst x [c&63]) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SRAconst) v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } // match: (SRA x (ANDconst [63] y)) // result: (SRA x y) for { x := v_0 if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 { break } y := v_1.Args[0] v.reset(OpARM64SRA) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64SRAconst(v *Value) bool { v_0 := v.Args[0] // match: (SRAconst [c] (MOVDconst [d])) // result: (MOVDconst [d>>uint64(c)]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(d >> uint64(c)) return true } // match: (SRAconst [rc] (SLLconst [lc] x)) // cond: lc > rc // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc > rc) { break } v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc)) v.AddArg(x) return true } // match: (SRAconst [rc] (SLLconst [lc] x)) // cond: lc <= rc // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc <= rc) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc)) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVWreg x)) // cond: rc < 32 // result: (SBFX [armBFAuxInt(rc, 32-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWreg { break } x := v_0.Args[0] if !(rc < 32) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc)) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVHreg x)) // cond: rc < 16 // result: (SBFX [armBFAuxInt(rc, 16-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHreg { break } x := v_0.Args[0] if !(rc < 16) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc)) v.AddArg(x) return true } // match: (SRAconst [rc] (MOVBreg x)) // cond: rc < 8 // result: (SBFX [armBFAuxInt(rc, 8-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBreg { break } x := v_0.Args[0] if !(rc < 8) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc)) v.AddArg(x) return true } // match: (SRAconst [sc] (SBFIZ [bfc] x)) // cond: sc < bfc.getARM64BFlsb() // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc < bfc.getARM64BFlsb()) { break } v.reset(OpARM64SBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())) v.AddArg(x) return true } // match: (SRAconst [sc] (SBFIZ [bfc] x)) // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { break } v.reset(OpARM64SBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64SRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRL x (MOVDconst [c])) // result: (SRLconst x [c&63]) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SRLconst) v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } // match: (SRL x (ANDconst [63] y)) // result: (SRL x y) for { x := v_0 if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 { break } y := v_1.Args[0] v.reset(OpARM64SRL) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpARM64SRLconst(v *Value) bool { v_0 := v.Args[0] // match: (SRLconst [c] (MOVDconst [d])) // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) return true } // match: (SRLconst [c] (SLLconst [c] x)) // cond: 0 < c && c < 64 // result: (ANDconst [1<<uint(64-c)-1] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if !(0 < c && c < 64) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1) v.AddArg(x) return true } // match: (SRLconst [rc] (MOVWUreg x)) // cond: rc >= 32 // result: (MOVDconst [0]) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg { break } if !(rc >= 32) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (SRLconst [rc] (MOVHUreg x)) // cond: rc >= 16 // result: (MOVDconst [0]) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg { break } if !(rc >= 16) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (SRLconst [rc] (MOVBUreg x)) // cond: rc >= 8 // result: (MOVDconst [0]) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg { break } if !(rc >= 8) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (SRLconst [rc] (SLLconst [lc] x)) // cond: lc > rc // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc > rc) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc)) v.AddArg(x) return true } // match: (SRLconst [rc] (SLLconst [lc] x)) // cond: lc < rc // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } lc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(lc < rc) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc)) v.AddArg(x) return true } // match: (SRLconst [rc] (MOVWUreg x)) // cond: rc < 32 // result: (UBFX [armBFAuxInt(rc, 32-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg { break } x := v_0.Args[0] if !(rc < 32) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc)) v.AddArg(x) return true } // match: (SRLconst [rc] (MOVHUreg x)) // cond: rc < 16 // result: (UBFX [armBFAuxInt(rc, 16-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg { break } x := v_0.Args[0] if !(rc < 16) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc)) v.AddArg(x) return true } // match: (SRLconst [rc] (MOVBUreg x)) // cond: rc < 8 // result: (UBFX [armBFAuxInt(rc, 8-rc)] x) for { rc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg { break } x := v_0.Args[0] if !(rc < 8) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc)) v.AddArg(x) return true } // match: (SRLconst [sc] (ANDconst [ac] x)) // cond: isARM64BFMask(sc, ac, sc) // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } ac := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(sc, ac, sc)) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc))) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFX [bfc] x)) // cond: sc < bfc.getARM64BFwidth() // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc < bfc.getARM64BFwidth()) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFIZ [bfc] x)) // cond: sc == bfc.getARM64BFlsb() // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc == bfc.getARM64BFlsb()) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFIZ [bfc] x)) // cond: sc < bfc.getARM64BFlsb() // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc < bfc.getARM64BFlsb()) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())) v.AddArg(x) return true } // match: (SRLconst [sc] (UBFIZ [bfc] x)) // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) for { sc := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFIZ { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64STP(v *Value) bool { v_3 := v.Args[3] v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (STP [off1+int32(off2)] {sym} ptr val1 val2 mem) for { off1 := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) if v_0.Op != OpARM64ADDconst { break } off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] val1 := v_1 val2 := v_2 mem := v_3 if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(off1 + int32(off2)) v.Aux = symToAux(sym) v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) for { off1 := auxIntToInt32(v.AuxInt) sym1 := auxToSym(v.Aux) if v_0.Op != OpARM64MOVDaddr { break } off2 := auxIntToInt32(v_0.AuxInt) sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val1 := v_1 val2 := v_2 mem := v_3 if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(off1 + off2) v.Aux = symToAux(mergeSym(sym1, sym2)) v.AddArg4(ptr, val1, val2, mem) return true } // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) // result: (MOVQstorezero [off] {sym} ptr mem) for { off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 || v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } mem := v_3 v.reset(OpARM64MOVQstorezero) v.AuxInt = int32ToAuxInt(off) v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpARM64SUB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (SUB x (MOVDconst [c])) // result: (SUBconst [c] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } // match: (SUB a l:(MUL x y)) // cond: l.Uses==1 && clobber(l) // result: (MSUB a x y) for { a := v_0 l := v_1 if l.Op != OpARM64MUL { break } y := l.Args[1] x := l.Args[0] if !(l.Uses == 1 && clobber(l)) { break } v.reset(OpARM64MSUB) v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEG x y)) // cond: l.Uses==1 && clobber(l) // result: (MADD a x y) for { a := v_0 l := v_1 if l.Op != OpARM64MNEG { break } y := l.Args[1] x := l.Args[0] if !(l.Uses == 1 && clobber(l)) { break } v.reset(OpARM64MADD) v.AddArg3(a, x, y) return true } // match: (SUB a l:(MULW x y)) // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) // result: (MSUBW a x y) for { a := v_0 l := v_1 if l.Op != OpARM64MULW { break } y := l.Args[1] x := l.Args[0] if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { break } v.reset(OpARM64MSUBW) v.AddArg3(a, x, y) return true } // match: (SUB a l:(MNEGW x y)) // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l) // result: (MADDW a x y) for { a := v_0 l := v_1 if l.Op != OpARM64MNEGW { break } y := l.Args[1] x := l.Args[0] if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) { break } v.reset(OpARM64MADDW) v.AddArg3(a, x, y) return true } // match: (SUB x x) // result: (MOVDconst [0]) for { x := v_0 if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (SUB x (SUB y z)) // result: (SUB (ADD <v.Type> x z) y) for { x := v_0 if v_1.Op != OpARM64SUB { break } z := v_1.Args[1] y := v_1.Args[0] v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) v0.AddArg2(x, z) v.AddArg2(v0, y) return true } // match: (SUB (SUB x y) z) // result: (SUB x (ADD <y.Type> y z)) for { if v_0.Op != OpARM64SUB { break } y := v_0.Args[1] x := v_0.Args[0] z := v_1 v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) v0.AddArg2(y, z) v.AddArg2(x, v0) return true } // match: (SUB x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (SUBshiftLL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64SUBshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (SUBshiftRL x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64SUBshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } // match: (SUB x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (SUBshiftRA x0 y [c]) for { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { break } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { break } v.reset(OpARM64SUBshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } return false } func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { v_0 := v.Args[0] // match: (SUBconst [0] x) // result: x for { if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 v.copyOf(x) return true } // match: (SUBconst [c] (MOVDconst [d])) // result: (MOVDconst [d-c]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(d - c) return true } // match: (SUBconst [c] (SUBconst [d] x)) // result: (ADDconst [-c-d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SUBconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(-c - d) v.AddArg(x) return true } // match: (SUBconst [c] (ADDconst [d] x)) // result: (ADDconst [-c+d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ADDconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(-c + d) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBshiftLL x (MOVDconst [c]) [d]) // result: (SUBconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (SUBshiftLL (SLLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBshiftRA x (MOVDconst [c]) [d]) // result: (SUBconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } // match: (SUBshiftRA (SRAconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBshiftRL x (MOVDconst [c]) [d]) // result: (SUBconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SUBconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (SUBshiftRL (SRLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64TST(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (TST x (MOVDconst [c])) // result: (TSTconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (TST x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64TSTshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (TST x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64TSTshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (TST x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64TSTshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (TST x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (TSTshiftRO x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64TSTshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } return false } func rewriteValueARM64_OpARM64TSTW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (TSTW x (MOVDconst [c])) // result: (TSTWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTWconst) v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } break } return false } func rewriteValueARM64_OpARM64TSTWconst(v *Value) bool { v_0 := v.Args[0] // match: (TSTWconst (MOVDconst [x]) [y]) // result: (FlagConstant [logicFlags32(int32(x)&y)]) for { y := auxIntToInt32(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(logicFlags32(int32(x) & y)) return true } return false } func rewriteValueARM64_OpARM64TSTconst(v *Value) bool { v_0 := v.Args[0] // match: (TSTconst (MOVDconst [x]) [y]) // result: (FlagConstant [logicFlags64(x&y)]) for { y := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } x := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64FlagConstant) v.AuxInt = flagConstantToAuxInt(logicFlags64(x & y)) return true } return false } func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (TSTshiftLL (MOVDconst [c]) x [d]) // result: (TSTconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (TSTshiftLL x (MOVDconst [c]) [d]) // result: (TSTconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (TSTshiftRA (MOVDconst [c]) x [d]) // result: (TSTconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (TSTshiftRA x (MOVDconst [c]) [d]) // result: (TSTconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (TSTshiftRL (MOVDconst [c]) x [d]) // result: (TSTconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (TSTshiftRL x (MOVDconst [c]) [d]) // result: (TSTconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (TSTshiftRO (MOVDconst [c]) x [d]) // result: (TSTconst [c] (RORconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (TSTshiftRO x (MOVDconst [c]) [d]) // result: (TSTconst x [rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool { v_0 := v.Args[0] // match: (UBFIZ [bfc] (SLLconst [sc] x)) // cond: sc < bfc.getARM64BFwidth() // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(sc < bfc.getARM64BFwidth()) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64UBFX(v *Value) bool { v_0 := v.Args[0] // match: (UBFX [bfc] (ANDconst [c] x)) // cond: isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) // result: (UBFX [bfc] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb()+bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(bfc) v.AddArg(x) return true } // match: (UBFX [bfc] (SRLconst [sc] x)) // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64 // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64SRLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())) v.AddArg(x) return true } // match: (UBFX [bfc] (SLLconst [sc] x)) // cond: sc == bfc.getARM64BFlsb() // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(sc == bfc.getARM64BFlsb()) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1) v.AddArg(x) return true } // match: (UBFX [bfc] (SLLconst [sc] x)) // cond: sc < bfc.getARM64BFlsb() // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(sc < bfc.getARM64BFlsb()) { break } v.reset(OpARM64UBFX) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())) v.AddArg(x) return true } // match: (UBFX [bfc] (SLLconst [sc] x)) // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) for { bfc := auxIntToArm64BitField(v.AuxInt) if v_0.Op != OpARM64SLLconst { break } sc := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) { break } v.reset(OpARM64UBFIZ) v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64UDIV(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (UDIV x (MOVDconst [1])) // result: x for { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.copyOf(x) return true } // match: (UDIV x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (SRLconst [log64(c)] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64SRLconst) v.AuxInt = int64ToAuxInt(log64(c)) v.AddArg(x) return true } // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint64(c)/uint64(d))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) return true } return false } func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (UDIVW x (MOVDconst [c])) // cond: uint32(c)==1 // result: (MOVWUreg x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) == 1) { break } v.reset(OpARM64MOVWUreg) v.AddArg(x) return true } // match: (UDIVW x (MOVDconst [c])) // cond: isPowerOfTwo64(c) && is32Bit(c) // result: (SRLconst [log64(c)] (MOVWUreg <v.Type> x)) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c) && is32Bit(c)) { break } v.reset(OpARM64SRLconst) v.AuxInt = int64ToAuxInt(log64(c)) v0 := b.NewValue0(v.Pos, OpARM64MOVWUreg, v.Type) v0.AddArg(x) v.AddArg(v0) return true } // match: (UDIVW (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint32(c)/uint32(d))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d))) return true } return false } func rewriteValueARM64_OpARM64UMOD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (UMOD <typ.UInt64> x y) // result: (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y)) for { if v.Type != typ.UInt64 { break } x := v_0 y := v_1 v.reset(OpARM64MSUB) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64) v0.AddArg2(x, y) v.AddArg3(x, y, v0) return true } // match: (UMOD _ (MOVDconst [1])) // result: (MOVDconst [0]) for { if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (UMOD x (MOVDconst [c])) // cond: isPowerOfTwo64(c) // result: (ANDconst [c-1] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c)) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c - 1) v.AddArg(x) return true } // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint64(c)%uint64(d))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) return true } return false } func rewriteValueARM64_OpARM64UMODW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (UMODW <typ.UInt32> x y) // result: (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y)) for { if v.Type != typ.UInt32 { break } x := v_0 y := v_1 v.reset(OpARM64MSUBW) v.Type = typ.UInt32 v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32) v0.AddArg2(x, y) v.AddArg3(x, y, v0) return true } // match: (UMODW _ (MOVDconst [c])) // cond: uint32(c)==1 // result: (MOVDconst [0]) for { if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) == 1) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (UMODW x (MOVDconst [c])) // cond: isPowerOfTwo64(c) && is32Bit(c) // result: (ANDconst [c-1] x) for { x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo64(c) && is32Bit(c)) { break } v.reset(OpARM64ANDconst) v.AuxInt = int64ToAuxInt(c - 1) v.AddArg(x) return true } // match: (UMODW (MOVDconst [c]) (MOVDconst [d])) // cond: d != 0 // result: (MOVDconst [int64(uint32(c)%uint32(d))]) for { if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_1.AuxInt) if !(d != 0) { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d))) return true } return false } func rewriteValueARM64_OpARM64XOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XOR x (MOVDconst [c])) // result: (XORconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } break } // match: (XOR x x) // result: (MOVDconst [0]) for { x := v_0 if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (XOR x (MVN y)) // result: (EON x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MVN { continue } y := v_1.Args[0] v.reset(OpARM64EON) v.AddArg2(x, y) return true } break } // match: (XOR x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftLL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SLLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64XORshiftLL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (XOR x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftRL x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRLconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64XORshiftRL) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (XOR x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftRA x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64SRAconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64XORshiftRA) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } // match: (XOR x0 x1:(RORconst [c] y)) // cond: clobberIfDead(x1) // result: (XORshiftRO x0 y [c]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x0 := v_0 x1 := v_1 if x1.Op != OpARM64RORconst { continue } c := auxIntToInt64(x1.AuxInt) y := x1.Args[0] if !(clobberIfDead(x1)) { continue } v.reset(OpARM64XORshiftRO) v.AuxInt = int64ToAuxInt(c) v.AddArg2(x0, y) return true } break } return false } func rewriteValueARM64_OpARM64XORconst(v *Value) bool { v_0 := v.Args[0] // match: (XORconst [0] x) // result: x for { if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 v.copyOf(x) return true } // match: (XORconst [-1] x) // result: (MVN x) for { if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 v.reset(OpARM64MVN) v.AddArg(x) return true } // match: (XORconst [c] (MOVDconst [d])) // result: (MOVDconst [c^d]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(c ^ d) return true } // match: (XORconst [c] (XORconst [d] x)) // result: (XORconst [c^d] x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64XORconst { break } d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c ^ d) v.AddArg(x) return true } return false } func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (XORshiftLL (MOVDconst [c]) x [d]) // result: (XORconst [c] (SLLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (XORshiftLL x (MOVDconst [c]) [d]) // result: (XORconst x [int64(uint64(c)<<uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d))) v.AddArg(x) return true } // match: (XORshiftLL (SLLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff // result: (REV16W x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16W) v.AddArg(x) return true } // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) // result: (REV16 x) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) { break } v.reset(OpARM64REV16) v.AddArg(x) return true } // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) // result: (REV16 (ANDconst <x.Type> [0xffffffff] x)) for { if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpARM64ANDconst { break } c1 := auxIntToInt64(v_0_0.AuxInt) x := v_0_0.Args[0] if v_1.Op != OpARM64ANDconst { break } c2 := auxIntToInt64(v_1.AuxInt) if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) { break } v.reset(OpARM64REV16) v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type) v0.AuxInt = int64ToAuxInt(0xffffffff) v0.AddArg(x) v.AddArg(v0) return true } // match: (XORshiftLL [c] (SRLconst x [64-c]) x2) // result: (EXTRconst [64-c] x2 x) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { break } x := v_0.Args[0] x2 := v_1 v.reset(OpARM64EXTRconst) v.AuxInt = int64ToAuxInt(64 - c) v.AddArg2(x2, x) return true } // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x2) // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) // result: (EXTRWconst [32-c] x2 x) for { t := v.Type c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64UBFX { break } bfc := auxIntToArm64BitField(v_0.AuxInt) x := v_0.Args[0] x2 := v_1 if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { break } v.reset(OpARM64EXTRWconst) v.AuxInt = int64ToAuxInt(32 - c) v.AddArg2(x2, x) return true } return false } func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (XORshiftRA (MOVDconst [c]) x [d]) // result: (XORconst [c] (SRAconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (XORshiftRA x (MOVDconst [c]) [d]) // result: (XORconst x [c>>uint64(d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } // match: (XORshiftRA (SRAconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (XORshiftRL (MOVDconst [c]) x [d]) // result: (XORconst [c] (SRLconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (XORshiftRL x (MOVDconst [c]) [d]) // result: (XORconst x [int64(uint64(c)>>uint64(d))]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d))) v.AddArg(x) return true } // match: (XORshiftRL (SRLconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (XORshiftRO (MOVDconst [c]) x [d]) // result: (XORconst [c] (RORconst <x.Type> x [d])) for { d := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type) v0.AuxInt = int64ToAuxInt(d) v0.AddArg(x) v.AddArg(v0) return true } // match: (XORshiftRO x (MOVDconst [c]) [d]) // result: (XORconst x [rotateRight64(c, d)]) for { d := auxIntToInt64(v.AuxInt) x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) v.AuxInt = int64ToAuxInt(rotateRight64(c, d)) v.AddArg(x) return true } // match: (XORshiftRO (RORconst x [c]) x [c]) // result: (MOVDconst [0]) for { c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c { break } x := v_0.Args[0] if x != v_1 { break } v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } return false } func rewriteValueARM64_OpAddr(v *Value) bool { v_0 := v.Args[0] // match: (Addr {sym} base) // result: (MOVDaddr {sym} base) for { sym := auxToSym(v.Aux) base := v_0 v.reset(OpARM64MOVDaddr) v.Aux = symToAux(sym) v.AddArg(base) return true } } func rewriteValueARM64_OpAvg64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Avg64u <t> x y) // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpARM64SUB, t) v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg2(v0, y) return true } } func rewriteValueARM64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen32 x) // result: (SUB (MOVDconst [32]) (CLZW <typ.Int> x)) for { x := v_0 v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(32) v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int) v1.AddArg(x) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitLen64 x) // result: (SUB (MOVDconst [64]) (CLZ <typ.Int> x)) for { x := v_0 v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(64) v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) v1.AddArg(x) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpBitRev16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitRev16 x) // result: (SRLconst [48] (RBIT <typ.UInt64> x)) for { x := v_0 v.reset(OpARM64SRLconst) v.AuxInt = int64ToAuxInt(48) v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) v0.AddArg(x) v.AddArg(v0) return true } } func rewriteValueARM64_OpBitRev8(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (BitRev8 x) // result: (SRLconst [56] (RBIT <typ.UInt64> x)) for { x := v_0 v.reset(OpARM64SRLconst) v.AuxInt = int64ToAuxInt(56) v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) v0.AddArg(x) v.AddArg(v0) return true } } func rewriteValueARM64_OpCondSelect(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (CondSelect x y boolval) // cond: flagArg(boolval) != nil // result: (CSEL [boolval.Op] x y flagArg(boolval)) for { x := v_0 y := v_1 boolval := v_2 if !(flagArg(boolval) != nil) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(boolval.Op) v.AddArg3(x, y, flagArg(boolval)) return true } // match: (CondSelect x y boolval) // cond: flagArg(boolval) == nil // result: (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval)) for { x := v_0 y := v_1 boolval := v_2 if !(flagArg(boolval) == nil) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(1) v0.AddArg(boolval) v.AddArg3(x, y, v0) return true } return false } func rewriteValueARM64_OpConst16(v *Value) bool { // match: (Const16 [val]) // result: (MOVDconst [int64(val)]) for { val := auxIntToInt16(v.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(val)) return true } } func rewriteValueARM64_OpConst32(v *Value) bool { // match: (Const32 [val]) // result: (MOVDconst [int64(val)]) for { val := auxIntToInt32(v.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(val)) return true } } func rewriteValueARM64_OpConst32F(v *Value) bool { // match: (Const32F [val]) // result: (FMOVSconst [float64(val)]) for { val := auxIntToFloat32(v.AuxInt) v.reset(OpARM64FMOVSconst) v.AuxInt = float64ToAuxInt(float64(val)) return true } } func rewriteValueARM64_OpConst64(v *Value) bool { // match: (Const64 [val]) // result: (MOVDconst [int64(val)]) for { val := auxIntToInt64(v.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(val)) return true } } func rewriteValueARM64_OpConst64F(v *Value) bool { // match: (Const64F [val]) // result: (FMOVDconst [float64(val)]) for { val := auxIntToFloat64(v.AuxInt) v.reset(OpARM64FMOVDconst) v.AuxInt = float64ToAuxInt(float64(val)) return true } } func rewriteValueARM64_OpConst8(v *Value) bool { // match: (Const8 [val]) // result: (MOVDconst [int64(val)]) for { val := auxIntToInt8(v.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(int64(val)) return true } } func rewriteValueARM64_OpConstBool(v *Value) bool { // match: (ConstBool [t]) // result: (MOVDconst [b2i(t)]) for { t := auxIntToBool(v.AuxInt) v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(b2i(t)) return true } } func rewriteValueARM64_OpConstNil(v *Value) bool { // match: (ConstNil) // result: (MOVDconst [0]) for { v.reset(OpARM64MOVDconst) v.AuxInt = int64ToAuxInt(0) return true } } func rewriteValueARM64_OpCtz16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 <t> x) // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x))) for { t := v.Type x := v_0 v.reset(OpARM64CLZW) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32) v1.AuxInt = int64ToAuxInt(0x10000) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } } func rewriteValueARM64_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Ctz32 <t> x) // result: (CLZW (RBITW <t> x)) for { t := v.Type x := v_0 v.reset(OpARM64CLZW) v0 := b.NewValue0(v.Pos, OpARM64RBITW, t) v0.AddArg(x) v.AddArg(v0) return true } } func rewriteValueARM64_OpCtz64(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Ctz64 <t> x) // result: (CLZ (RBIT <t> x)) for { t := v.Type x := v_0 v.reset(OpARM64CLZ) v0 := b.NewValue0(v.Pos, OpARM64RBIT, t) v0.AddArg(x) v.AddArg(v0) return true } } func rewriteValueARM64_OpCtz8(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 <t> x) // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x))) for { t := v.Type x := v_0 v.reset(OpARM64CLZW) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32) v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32) v1.AuxInt = int64ToAuxInt(0x100) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } } func rewriteValueARM64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16 [false] x y) // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { if auxIntToBool(v.AuxInt) != false { break } x := v_0 y := v_1 v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpDiv16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div16u x y) // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpDiv32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Div32 [false] x y) // result: (DIVW x y) for { if auxIntToBool(v.AuxInt) != false { break } x := v_0 y := v_1 v.reset(OpARM64DIVW) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Div64 [false] x y) // result: (DIV x y) for { if auxIntToBool(v.AuxInt) != false { break } x := v_0 y := v_1 v.reset(OpARM64DIV) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8 x y) // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64DIVW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Div8u x y) // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64UDIVW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq16 x y) // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Eq32 x y) // result: (Equal (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Eq32F x y) // result: (Equal (FCMPS x y)) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Eq64 x y) // result: (Equal (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Eq64F x y) // result: (Equal (FCMPD x y)) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Eq8 x y) // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) // result: (XOR (MOVDconst [1]) (XOR <typ.Bool> x y)) for { x := v_0 y := v_1 v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) v1.AddArg2(x, y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (EqPtr x y) // result: (Equal (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64Equal) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpFMA(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (FMA x y z) // result: (FMADDD z x y) for { x := v_0 y := v_1 z := v_2 v.reset(OpARM64FMADDD) v.AddArg3(z, x, y) return true } } func rewriteValueARM64_OpHmul32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32 x y) // result: (SRAconst (MULL <typ.Int64> x y) [32]) for { x := v_0 y := v_1 v.reset(OpARM64SRAconst) v.AuxInt = int64ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpHmul32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Hmul32u x y) // result: (SRAconst (UMULL <typ.UInt64> x y) [32]) for { x := v_0 y := v_1 v.reset(OpARM64SRAconst) v.AuxInt = int64ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpIsInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (IsInBounds idx len) // result: (LessThanU (CMP idx len)) for { idx := v_0 len := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(idx, len) v.AddArg(v0) return true } } func rewriteValueARM64_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (IsNonNil ptr) // result: (NotEqual (CMPconst [0] ptr)) for { ptr := v_0 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(0) v0.AddArg(ptr) v.AddArg(v0) return true } } func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (IsSliceInBounds idx len) // result: (LessEqualU (CMP idx len)) for { idx := v_0 len := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(idx, len) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16 x y) // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq16U x zero:(MOVDconst [0])) // result: (Eq16 x zero) for { x := v_0 zero := v_1 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } v.reset(OpEq16) v.AddArg2(x, zero) return true } // match: (Leq16U (MOVDconst [1]) x) // result: (Neq16 (MOVDconst [0]) x) for { if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_1 v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } // match: (Leq16U x y) // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Leq32 x y) // result: (LessEqual (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Leq32F x y) // result: (LessEqualF (FCMPS x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq32U x zero:(MOVDconst [0])) // result: (Eq32 x zero) for { x := v_0 zero := v_1 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } v.reset(OpEq32) v.AddArg2(x, zero) return true } // match: (Leq32U (MOVDconst [1]) x) // result: (Neq32 (MOVDconst [0]) x) for { if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_1 v.reset(OpNeq32) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } // match: (Leq32U x y) // result: (LessEqualU (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Leq64 x y) // result: (LessEqual (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Leq64F x y) // result: (LessEqualF (FCMPD x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq64U x zero:(MOVDconst [0])) // result: (Eq64 x zero) for { x := v_0 zero := v_1 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } v.reset(OpEq64) v.AddArg2(x, zero) return true } // match: (Leq64U (MOVDconst [1]) x) // result: (Neq64 (MOVDconst [0]) x) for { if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_1 v.reset(OpNeq64) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } // match: (Leq64U x y) // result: (LessEqualU (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8 x y) // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Leq8U x zero:(MOVDconst [0])) // result: (Eq8 x zero) for { x := v_0 zero := v_1 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } v.reset(OpEq8) v.AddArg2(x, zero) return true } // match: (Leq8U (MOVDconst [1]) x) // result: (Neq8 (MOVDconst [0]) x) for { if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 { break } x := v_1 v.reset(OpNeq8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(v0, x) return true } // match: (Leq8U x y) // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessEqualU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16 x y) // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less16U zero:(MOVDconst [0]) x) // result: (Neq16 zero x) for { zero := v_0 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } x := v_1 v.reset(OpNeq16) v.AddArg2(zero, x) return true } // match: (Less16U x (MOVDconst [1])) // result: (Eq16 x (MOVDconst [0])) for { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(x, v0) return true } // match: (Less16U x y) // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Less32 x y) // result: (LessThan (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Less32F x y) // result: (LessThanF (FCMPS x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less32U zero:(MOVDconst [0]) x) // result: (Neq32 zero x) for { zero := v_0 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } x := v_1 v.reset(OpNeq32) v.AddArg2(zero, x) return true } // match: (Less32U x (MOVDconst [1])) // result: (Eq32 x (MOVDconst [0])) for { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpEq32) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(x, v0) return true } // match: (Less32U x y) // result: (LessThanU (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Less64 x y) // result: (LessThan (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Less64F x y) // result: (LessThanF (FCMPD x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThanF) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less64U zero:(MOVDconst [0]) x) // result: (Neq64 zero x) for { zero := v_0 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } x := v_1 v.reset(OpNeq64) v.AddArg2(zero, x) return true } // match: (Less64U x (MOVDconst [1])) // result: (Eq64 x (MOVDconst [0])) for { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpEq64) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(x, v0) return true } // match: (Less64U x y) // result: (LessThanU (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8 x y) // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessThan) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Less8U zero:(MOVDconst [0]) x) // result: (Neq8 zero x) for { zero := v_0 if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 { break } x := v_1 v.reset(OpNeq8) v.AddArg2(zero, x) return true } // match: (Less8U x (MOVDconst [1])) // result: (Eq8 x (MOVDconst [0])) for { x := v_0 if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpEq8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg2(x, v0) return true } // match: (Less8U x y) // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpLoad(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Load <t> ptr mem) // cond: t.IsBoolean() // result: (MOVBUload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(t.IsBoolean()) { break } v.reset(OpARM64MOVBUload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is8BitInt(t) && t.IsSigned()) // result: (MOVBload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is8BitInt(t) && t.IsSigned()) { break } v.reset(OpARM64MOVBload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is8BitInt(t) && !t.IsSigned()) // result: (MOVBUload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is8BitInt(t) && !t.IsSigned()) { break } v.reset(OpARM64MOVBUload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is16BitInt(t) && t.IsSigned()) // result: (MOVHload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is16BitInt(t) && t.IsSigned()) { break } v.reset(OpARM64MOVHload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is16BitInt(t) && !t.IsSigned()) // result: (MOVHUload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is16BitInt(t) && !t.IsSigned()) { break } v.reset(OpARM64MOVHUload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is32BitInt(t) && t.IsSigned()) // result: (MOVWload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is32BitInt(t) && t.IsSigned()) { break } v.reset(OpARM64MOVWload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is32BitInt(t) && !t.IsSigned()) // result: (MOVWUload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is32BitInt(t) && !t.IsSigned()) { break } v.reset(OpARM64MOVWUload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: (is64BitInt(t) || isPtr(t)) // result: (MOVDload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is64BitInt(t) || isPtr(t)) { break } v.reset(OpARM64MOVDload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: is32BitFloat(t) // result: (FMOVSload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is32BitFloat(t)) { break } v.reset(OpARM64FMOVSload) v.AddArg2(ptr, mem) return true } // match: (Load <t> ptr mem) // cond: is64BitFloat(t) // result: (FMOVDload ptr mem) for { t := v.Type ptr := v_0 mem := v_1 if !(is64BitFloat(t)) { break } v.reset(OpARM64FMOVDload) v.AddArg2(ptr, mem) return true } return false } func rewriteValueARM64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (LocalAddr <t> {sym} base mem) // cond: t.Elem().HasPointers() // result: (MOVDaddr {sym} (SPanchored base mem)) for { t := v.Type sym := auxToSym(v.Aux) base := v_0 mem := v_1 if !(t.Elem().HasPointers()) { break } v.reset(OpARM64MOVDaddr) v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) v0.AddArg2(base, mem) v.AddArg(v0) return true } // match: (LocalAddr <t> {sym} base _) // cond: !t.Elem().HasPointers() // result: (MOVDaddr {sym} base) for { t := v.Type sym := auxToSym(v.Aux) base := v_0 if !(!t.Elem().HasPointers()) { break } v.reset(OpARM64MOVDaddr) v.Aux = symToAux(sym) v.AddArg(base) return true } return false } func rewriteValueARM64_OpLsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x16 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh16x16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x32 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh16x32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh16x64 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh16x64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh16x8 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh16x8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x16 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh32x16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x32 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh32x32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh32x64 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh32x64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh32x8 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh32x8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x16 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh64x16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x32 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh64x32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh64x64 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh64x64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh64x8 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh64x8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x16 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh8x16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x32 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh8x32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh8x64 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh8x64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Lsh8x8 <t> x y) // cond: shiftIsBounded(v) // result: (SLL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SLL) v.Type = t v.AddArg2(x, y) return true } // match: (Lsh8x8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16 x y) // result: (MODW (SignExt16to32 x) (SignExt16to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod16u x y) // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mod32 x y) // result: (MODW x y) for { x := v_0 y := v_1 v.reset(OpARM64MODW) v.AddArg2(x, y) return true } } func rewriteValueARM64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Mod64 x y) // result: (MOD x y) for { x := v_0 y := v_1 v.reset(OpARM64MOD) v.AddArg2(x, y) return true } } func rewriteValueARM64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8 x y) // result: (MODW (SignExt8to32 x) (SignExt8to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64MODW) v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Mod8u x y) // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { x := v_0 y := v_1 v.reset(OpARM64UMODW) v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(y) v.AddArg2(v0, v1) return true } } func rewriteValueARM64_OpMove(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types // match: (Move [0] _ _ mem) // result: mem for { if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_2 v.copyOf(mem) return true } // match: (Move [1] dst src mem) // result: (MOVBstore dst (MOVBUload src mem) mem) for { if auxIntToInt64(v.AuxInt) != 1 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AddArg2(src, mem) v.AddArg3(dst, v0, mem) return true } // match: (Move [2] dst src mem) // result: (MOVHstore dst (MOVHUload src mem) mem) for { if auxIntToInt64(v.AuxInt) != 2 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v0.AddArg2(src, mem) v.AddArg3(dst, v0, mem) return true } // match: (Move [3] dst src mem) // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 3 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [4] dst src mem) // result: (MOVWstore dst (MOVWUload src mem) mem) for { if auxIntToInt64(v.AuxInt) != 4 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v0.AddArg2(src, mem) v.AddArg3(dst, v0, mem) return true } // match: (Move [5] dst src mem) // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 5 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [6] dst src mem) // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 6 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v0.AuxInt = int32ToAuxInt(4) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [7] dst src mem) // result: (MOVWstore [3] dst (MOVWUload [3] src mem) (MOVWstore dst (MOVWUload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 7 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v0.AuxInt = int32ToAuxInt(3) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [8] dst src mem) // result: (MOVDstore dst (MOVDload src mem) mem) for { if auxIntToInt64(v.AuxInt) != 8 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AddArg2(src, mem) v.AddArg3(dst, v0, mem) return true } // match: (Move [9] dst src mem) // result: (MOVBstore [8] dst (MOVBUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 9 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [10] dst src mem) // result: (MOVHstore [8] dst (MOVHUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 10 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [11] dst src mem) // result: (MOVDstore [3] dst (MOVDload [3] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 11 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = int32ToAuxInt(3) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [12] dst src mem) // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 12 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) v0.AuxInt = int32ToAuxInt(8) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [13] dst src mem) // result: (MOVDstore [5] dst (MOVDload [5] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 13 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(5) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = int32ToAuxInt(5) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [14] dst src mem) // result: (MOVDstore [6] dst (MOVDload [6] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 14 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = int32ToAuxInt(6) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [15] dst src mem) // result: (MOVDstore [7] dst (MOVDload [7] src mem) (MOVDstore dst (MOVDload src mem) mem)) for { if auxIntToInt64(v.AuxInt) != 15 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(7) v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v0.AuxInt = int32ToAuxInt(7) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) v2.AddArg2(src, mem) v1.AddArg3(dst, v2, mem) v.AddArg3(dst, v0, v1) return true } // match: (Move [16] dst src mem) // result: (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem) for { if auxIntToInt64(v.AuxInt) != 16 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64STP) v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v1.AddArg2(src, mem) v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v2.AddArg(v1) v.AddArg4(dst, v0, v2, mem) return true } // match: (Move [32] dst src mem) // result: (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)) for { if auxIntToInt64(v.AuxInt) != 32 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v1.AuxInt = int32ToAuxInt(16) v1.AddArg2(src, mem) v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v2.AddArg(v1) v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v5.AddArg2(src, mem) v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v6.AddArg(v5) v3.AddArg4(dst, v4, v6, mem) v.AddArg4(dst, v0, v2, v3) return true } // match: (Move [48] dst src mem) // result: (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem)) (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))) for { if auxIntToInt64(v.AuxInt) != 48 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v1.AuxInt = int32ToAuxInt(32) v1.AddArg2(src, mem) v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v2.AddArg(v1) v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v3.AuxInt = int32ToAuxInt(16) v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v5.AuxInt = int32ToAuxInt(16) v5.AddArg2(src, mem) v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v6.AddArg(v5) v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v9.AddArg2(src, mem) v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v10.AddArg(v9) v7.AddArg4(dst, v8, v10, mem) v3.AddArg4(dst, v4, v6, v7) v.AddArg4(dst, v0, v2, v3) return true } // match: (Move [64] dst src mem) // result: (STP [48] dst (Select0 <typ.UInt64> (LDP [48] src mem)) (Select1 <typ.UInt64> (LDP [48] src mem)) (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem)) (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))) for { if auxIntToInt64(v.AuxInt) != 64 { break } dst := v_0 src := v_1 mem := v_2 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(48) v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v1.AuxInt = int32ToAuxInt(48) v1.AddArg2(src, mem) v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v2.AddArg(v1) v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v3.AuxInt = int32ToAuxInt(32) v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v5.AuxInt = int32ToAuxInt(32) v5.AddArg2(src, mem) v4.AddArg(v5) v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v6.AddArg(v5) v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v7.AuxInt = int32ToAuxInt(16) v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v9.AuxInt = int32ToAuxInt(16) v9.AddArg2(src, mem) v8.AddArg(v9) v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v10.AddArg(v9) v11 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v12 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) v13 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64)) v13.AddArg2(src, mem) v12.AddArg(v13) v14 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) v14.AddArg(v13) v11.AddArg4(dst, v12, v14, mem) v7.AddArg4(dst, v8, v10, v11) v3.AddArg4(dst, v4, v6, v7) v.AddArg4(dst, v0, v2, v3) return true } // match: (Move [s] dst src mem) // cond: s%16 != 0 && s%16 <= 8 && s > 16 // result: (Move [8] (OffPtr <dst.Type> dst [s-8]) (OffPtr <src.Type> src [s-8]) (Move [s-s%16] dst src mem)) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 if !(s%16 != 0 && s%16 <= 8 && s > 16) { break } v.reset(OpMove) v.AuxInt = int64ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = int64ToAuxInt(s - 8) v0.AddArg(dst) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = int64ToAuxInt(s - 8) v1.AddArg(src) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = int64ToAuxInt(s - s%16) v2.AddArg3(dst, src, mem) v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) // cond: s%16 != 0 && s%16 > 8 && s > 16 // result: (Move [16] (OffPtr <dst.Type> dst [s-16]) (OffPtr <src.Type> src [s-16]) (Move [s-s%16] dst src mem)) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 if !(s%16 != 0 && s%16 > 8 && s > 16) { break } v.reset(OpMove) v.AuxInt = int64ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(dst) v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) v1.AuxInt = int64ToAuxInt(s - 16) v1.AddArg(src) v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) v2.AuxInt = int64ToAuxInt(s - s%16) v2.AddArg3(dst, src, mem) v.AddArg3(v0, v1, v2) return true } // match: (Move [s] dst src mem) // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s) // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { break } v.reset(OpARM64DUFFCOPY) v.AuxInt = int64ToAuxInt(8 * (64 - s/16)) v.AddArg3(dst, src, mem) return true } // match: (Move [s] dst src mem) // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s) // result: (LoweredMove dst src (ADDconst <src.Type> src [s-16]) mem) for { s := auxIntToInt64(v.AuxInt) dst := v_0 src := v_1 mem := v_2 if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)) { break } v.reset(OpARM64LoweredMove) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(src) v.AddArg4(dst, src, v0, mem) return true } return false } func rewriteValueARM64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq16 x y) // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Neq32 x y) // result: (NotEqual (CMPW x y)) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Neq32F x y) // result: (NotEqual (FCMPS x y)) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Neq64 x y) // result: (NotEqual (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Neq64F x y) // result: (NotEqual (FCMPD x y)) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Neq8 x y) // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v2.AddArg(y) v0.AddArg2(v1, v2) v.AddArg(v0) return true } } func rewriteValueARM64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (NeqPtr x y) // result: (NotEqual (CMP x y)) for { x := v_0 y := v_1 v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } func rewriteValueARM64_OpNot(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Not x) // result: (XOR (MOVDconst [1]) x) for { x := v_0 v.reset(OpARM64XOR) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(1) v.AddArg2(v0, x) return true } } func rewriteValueARM64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] // match: (OffPtr [off] ptr:(SP)) // cond: is32Bit(off) // result: (MOVDaddr [int32(off)] ptr) for { off := auxIntToInt64(v.AuxInt) ptr := v_0 if ptr.Op != OpSP || !(is32Bit(off)) { break } v.reset(OpARM64MOVDaddr) v.AuxInt = int32ToAuxInt(int32(off)) v.AddArg(ptr) return true } // match: (OffPtr [off] ptr) // result: (ADDconst [off] ptr) for { off := auxIntToInt64(v.AuxInt) ptr := v_0 v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(off) v.AddArg(ptr) return true } } func rewriteValueARM64_OpPanicBounds(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 0 // result: (LoweredPanicBoundsA [kind] x y mem) for { kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 if !(boundsABI(kind) == 0) { break } v.reset(OpARM64LoweredPanicBoundsA) v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 1 // result: (LoweredPanicBoundsB [kind] x y mem) for { kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 if !(boundsABI(kind) == 1) { break } v.reset(OpARM64LoweredPanicBoundsB) v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } // match: (PanicBounds [kind] x y mem) // cond: boundsABI(kind) == 2 // result: (LoweredPanicBoundsC [kind] x y mem) for { kind := auxIntToInt64(v.AuxInt) x := v_0 y := v_1 mem := v_2 if !(boundsABI(kind) == 2) { break } v.reset(OpARM64LoweredPanicBoundsC) v.AuxInt = int64ToAuxInt(kind) v.AddArg3(x, y, mem) return true } return false } func rewriteValueARM64_OpPopCount16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount16 <t> x) // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x))))) for { t := v.Type x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(x) v2.AddArg(v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } } func rewriteValueARM64_OpPopCount32(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount32 <t> x) // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x))))) for { t := v.Type x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(x) v2.AddArg(v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } } func rewriteValueARM64_OpPopCount64(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (PopCount64 <t> x) // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x)))) for { t := v.Type x := v_0 v.reset(OpARM64FMOVDfpgp) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) v2.AddArg(x) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } } func rewriteValueARM64_OpPrefetchCache(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (PrefetchCache addr mem) // result: (PRFM [0] addr mem) for { addr := v_0 mem := v_1 v.reset(OpARM64PRFM) v.AuxInt = int64ToAuxInt(0) v.AddArg2(addr, mem) return true } } func rewriteValueARM64_OpPrefetchCacheStreamed(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (PrefetchCacheStreamed addr mem) // result: (PRFM [1] addr mem) for { addr := v_0 mem := v_1 v.reset(OpARM64PRFM) v.AuxInt = int64ToAuxInt(1) v.AddArg2(addr, mem) return true } } func rewriteValueARM64_OpPubBarrier(v *Value) bool { v_0 := v.Args[0] // match: (PubBarrier mem) // result: (DMB [0xe] mem) for { mem := v_0 v.reset(OpARM64DMB) v.AuxInt = int64ToAuxInt(0xe) v.AddArg(mem) return true } } func rewriteValueARM64_OpRotateLeft16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft16 <t> x (MOVDconst [c])) // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15]))) for { t := v.Type x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr16) v0 := b.NewValue0(v.Pos, OpLsh16x64, t) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = int64ToAuxInt(c & 15) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = int64ToAuxInt(-c & 15) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true } // match: (RotateLeft16 <t> x y) // result: (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y)) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64RORW) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64ORshiftLL, typ.UInt32) v0.AuxInt = int64ToAuxInt(16) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v0.AddArg2(v1, v1) v2 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64) v2.AddArg(y) v.AddArg2(v0, v2) return true } } func rewriteValueARM64_OpRotateLeft32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (RotateLeft32 x y) // result: (RORW x (NEG <y.Type> y)) for { x := v_0 y := v_1 v.reset(OpARM64RORW) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) v.AddArg2(x, v0) return true } } func rewriteValueARM64_OpRotateLeft64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (RotateLeft64 x y) // result: (ROR x (NEG <y.Type> y)) for { x := v_0 y := v_1 v.reset(OpARM64ROR) v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) v0.AddArg(y) v.AddArg2(x, v0) return true } } func rewriteValueARM64_OpRotateLeft8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (RotateLeft8 <t> x (MOVDconst [c])) // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7]))) for { t := v.Type x := v_0 if v_1.Op != OpARM64MOVDconst { break } c := auxIntToInt64(v_1.AuxInt) v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpLsh8x64, t) v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v1.AuxInt = int64ToAuxInt(c & 7) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v3.AuxInt = int64ToAuxInt(-c & 7) v2.AddArg2(x, v3) v.AddArg2(v0, v2) return true } // match: (RotateLeft8 <t> x y) // result: (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y)))) for { t := v.Type x := v_0 y := v_1 v.reset(OpARM64OR) v.Type = t v0 := b.NewValue0(v.Pos, OpARM64SLL, t) v1 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(7) v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARM64SRL, t) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(x) v4 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64) v4.AuxInt = int64ToAuxInt(7) v5 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64) v5.AddArg(y) v4.AddArg(v5) v2.AddArg2(v3, v4) v.AddArg2(v0, v2) return true } } func rewriteValueARM64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux16 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16Ux16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux32 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16Ux32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux64 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16Ux64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16Ux8 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16Ux8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x16 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16x16 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x32 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16x32 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x64 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16x64 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh16x8 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt16to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh16x8 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux16 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32Ux16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux32 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32Ux32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux64 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32Ux64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32Ux8 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32Ux8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x16 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32x16 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x32 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32x32 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x64 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32x64 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh32x8 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt32to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh32x8 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh64Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux16 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64Ux16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpRsh64Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux32 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64Ux32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpRsh64Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh64Ux64 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64Ux64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64Ux8 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64Ux8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpConst64, t) v1.AuxInt = int64ToAuxInt(0) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v.AddArg3(v0, v1, v2) return true } return false } func rewriteValueARM64_OpRsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x16 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64x16 x y) // cond: !shiftIsBounded(v) // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = int64ToAuxInt(63) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v0.AddArg3(y, v1, v2) v.AddArg2(x, v0) return true } return false } func rewriteValueARM64_OpRsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x32 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64x32 x y) // cond: !shiftIsBounded(v) // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = int64ToAuxInt(63) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v0.AddArg3(y, v1, v2) v.AddArg2(x, v0) return true } return false } func rewriteValueARM64_OpRsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh64x64 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64x64 x y) // cond: !shiftIsBounded(v) // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = int64ToAuxInt(63) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v2.AddArg(y) v0.AddArg3(y, v1, v2) v.AddArg2(x, v0) return true } return false } func rewriteValueARM64_OpRsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh64x8 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> x y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v.AddArg2(x, y) return true } // match: (Rsh64x8 x y) // cond: !shiftIsBounded(v) // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v0.AuxInt = opToAuxInt(OpARM64LessThanU) v1 := b.NewValue0(v.Pos, OpConst64, y.Type) v1.AuxInt = int64ToAuxInt(63) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v2.AuxInt = int64ToAuxInt(64) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) v0.AddArg3(y, v1, v2) v.AddArg2(x, v0) return true } return false } func rewriteValueARM64_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux16 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8Ux16 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux32 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8Ux32 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux64 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8Ux64 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y)) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8Ux8 <t> x y) // cond: shiftIsBounded(v) // result: (SRL <t> (ZeroExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRL) v.Type = t v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8Ux8 <t> x y) // cond: !shiftIsBounded(v) // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64CSEL) v.AuxInt = opToAuxInt(OpARM64LessThanU) v0 := b.NewValue0(v.Pos, OpARM64SRL, t) v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpConst64, t) v2.AuxInt = int64ToAuxInt(0) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v.AddArg3(v0, v2, v3) return true } return false } func rewriteValueARM64_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x16 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8x16 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x32 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8x32 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x64 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8x64 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v3.AddArg(y) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpRsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Rsh8x8 <t> x y) // cond: shiftIsBounded(v) // result: (SRA <t> (SignExt8to64 x) y) for { t := v.Type x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v.Type = t v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v.AddArg2(v0, y) return true } // match: (Rsh8x8 x y) // cond: !shiftIsBounded(v) // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { x := v_0 y := v_1 if !(!shiftIsBounded(v)) { break } v.reset(OpARM64SRA) v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) v1.AuxInt = opToAuxInt(OpARM64LessThanU) v2 := b.NewValue0(v.Pos, OpConst64, y.Type) v2.AuxInt = int64ToAuxInt(63) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v3.AuxInt = int64ToAuxInt(64) v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) v1.AddArg3(y, v2, v3) v.AddArg2(v0, v1) return true } return false } func rewriteValueARM64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select0 (Mul64uhilo x y)) // result: (UMULH x y) for { if v_0.Op != OpMul64uhilo { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64UMULH) v.AddArg2(x, y) return true } // match: (Select0 (Add64carry x y c)) // result: (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))) for { if v_0.Op != OpAdd64carry { break } c := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AuxInt = int64ToAuxInt(-1) v2.AddArg(c) v1.AddArg(v2) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } // match: (Select0 (Sub64borrow x y bo)) // result: (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo)))) for { if v_0.Op != OpSub64borrow { break } bo := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] v.reset(OpSelect0) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2.AddArg(bo) v1.AddArg(v2) v0.AddArg3(x, y, v1) v.AddArg(v0) return true } // match: (Select0 (Mul64uover x y)) // result: (MUL x y) for { if v_0.Op != OpMul64uover { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MUL) v.AddArg2(x, y) return true } return false } func rewriteValueARM64_OpSelect1(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types // match: (Select1 (Mul64uhilo x y)) // result: (MUL x y) for { if v_0.Op != OpMul64uhilo { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64MUL) v.AddArg2(x, y) return true } // match: (Select1 (Add64carry x y c)) // result: (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))) for { if v_0.Op != OpAdd64carry { break } c := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] v.reset(OpARM64ADCzerocarry) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v3.AuxInt = int64ToAuxInt(-1) v3.AddArg(c) v2.AddArg(v3) v1.AddArg3(x, y, v2) v0.AddArg(v1) v.AddArg(v0) return true } // match: (Select1 (Sub64borrow x y bo)) // result: (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo)))))) for { if v_0.Op != OpSub64borrow { break } bo := v_0.Args[2] x := v_0.Args[0] y := v_0.Args[1] v.reset(OpARM64NEG) v.Type = typ.UInt64 v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64) v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags)) v4.AddArg(bo) v3.AddArg(v4) v2.AddArg3(x, y, v3) v1.AddArg(v2) v0.AddArg(v1) v.AddArg(v0) return true } // match: (Select1 (Mul64uover x y)) // result: (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0])) for { if v_0.Op != OpMul64uover { break } y := v_0.Args[1] x := v_0.Args[0] v.reset(OpARM64NotEqual) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64UMULH, typ.UInt64) v1.AddArg2(x, y) v0.AddArg(v1) v.AddArg(v0) return true } return false } func rewriteValueARM64_OpSelectN(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call) // result: (Move [sz] dst src mem) for { if auxIntToInt64(v.AuxInt) != 0 { break } call := v_0 if call.Op != OpARM64CALLstatic || len(call.Args) != 1 { break } sym := auxToCall(call.Aux) s1 := call.Args[0] if s1.Op != OpARM64MOVDstore { break } _ = s1.Args[2] s1_1 := s1.Args[1] if s1_1.Op != OpARM64MOVDconst { break } sz := auxIntToInt64(s1_1.AuxInt) s2 := s1.Args[2] if s2.Op != OpARM64MOVDstore { break } _ = s2.Args[2] src := s2.Args[1] s3 := s2.Args[2] if s3.Op != OpARM64MOVDstore { break } mem := s3.Args[2] dst := s3.Args[1] if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) { break } v.reset(OpMove) v.AuxInt = int64ToAuxInt(sz) v.AddArg3(dst, src, mem) return true } // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem)) // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call) // result: (Move [sz] dst src mem) for { if auxIntToInt64(v.AuxInt) != 0 { break } call := v_0 if call.Op != OpARM64CALLstatic || len(call.Args) != 4 { break } sym := auxToCall(call.Aux) mem := call.Args[3] dst := call.Args[0] src := call.Args[1] call_2 := call.Args[2] if call_2.Op != OpARM64MOVDconst { break } sz := auxIntToInt64(call_2.AuxInt) if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) { break } v.reset(OpMove) v.AuxInt = int64ToAuxInt(sz) v.AddArg3(dst, src, mem) return true } return false } func rewriteValueARM64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Slicemask <t> x) // result: (SRAconst (NEG <t> x) [63]) for { t := v.Type x := v_0 v.reset(OpARM64SRAconst) v.AuxInt = int64ToAuxInt(63) v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(x) v.AddArg(v0) return true } } func rewriteValueARM64_OpStore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] // match: (Store {t} ptr val mem) // cond: t.Size() == 1 // result: (MOVBstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 1) { break } v.reset(OpARM64MOVBstore) v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) // cond: t.Size() == 2 // result: (MOVHstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 2) { break } v.reset(OpARM64MOVHstore) v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) // cond: t.Size() == 4 && !t.IsFloat() // result: (MOVWstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 4 && !t.IsFloat()) { break } v.reset(OpARM64MOVWstore) v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) // cond: t.Size() == 8 && !t.IsFloat() // result: (MOVDstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 8 && !t.IsFloat()) { break } v.reset(OpARM64MOVDstore) v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) // cond: t.Size() == 4 && t.IsFloat() // result: (FMOVSstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 4 && t.IsFloat()) { break } v.reset(OpARM64FMOVSstore) v.AddArg3(ptr, val, mem) return true } // match: (Store {t} ptr val mem) // cond: t.Size() == 8 && t.IsFloat() // result: (FMOVDstore ptr val mem) for { t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 if !(t.Size() == 8 && t.IsFloat()) { break } v.reset(OpARM64FMOVDstore) v.AddArg3(ptr, val, mem) return true } return false } func rewriteValueARM64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block config := b.Func.Config typ := &b.Func.Config.Types // match: (Zero [0] _ mem) // result: mem for { if auxIntToInt64(v.AuxInt) != 0 { break } mem := v_1 v.copyOf(mem) return true } // match: (Zero [1] ptr mem) // result: (MOVBstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 1 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) // result: (MOVHstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 2 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] ptr mem) // result: (MOVWstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 4 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [3] ptr mem) // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 3 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [5] ptr mem) // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 5 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [6] ptr mem) // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 6 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(4) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [7] ptr mem) // result: (MOVWstore [3] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 7 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [8] ptr mem) // result: (MOVDstore ptr (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 8 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [9] ptr mem) // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 9 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVBstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [10] ptr mem) // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 10 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVHstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [11] ptr mem) // result: (MOVDstore [3] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 11 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(3) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [12] ptr mem) // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 12 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVWstore) v.AuxInt = int32ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [13] ptr mem) // result: (MOVDstore [5] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 13 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(5) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [14] ptr mem) // result: (MOVDstore [6] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 14 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(6) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [15] ptr mem) // result: (MOVDstore [7] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 15 { break } ptr := v_0 mem := v_1 v.reset(OpARM64MOVDstore) v.AuxInt = int32ToAuxInt(7) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [16] ptr mem) // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) for { if auxIntToInt64(v.AuxInt) != 16 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v.AddArg4(ptr, v0, v0, mem) return true } // match: (Zero [32] ptr mem) // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) for { if auxIntToInt64(v.AuxInt) != 32 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v1.AuxInt = int32ToAuxInt(0) v1.AddArg4(ptr, v0, v0, mem) v.AddArg4(ptr, v0, v0, v1) return true } // match: (Zero [48] ptr mem) // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) for { if auxIntToInt64(v.AuxInt) != 48 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v1.AuxInt = int32ToAuxInt(16) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = int32ToAuxInt(0) v2.AddArg4(ptr, v0, v0, mem) v1.AddArg4(ptr, v0, v0, v2) v.AddArg4(ptr, v0, v0, v1) return true } // match: (Zero [64] ptr mem) // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) for { if auxIntToInt64(v.AuxInt) != 64 { break } ptr := v_0 mem := v_1 v.reset(OpARM64STP) v.AuxInt = int32ToAuxInt(48) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = int64ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v1.AuxInt = int32ToAuxInt(32) v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v2.AuxInt = int32ToAuxInt(16) v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) v3.AuxInt = int32ToAuxInt(0) v3.AddArg4(ptr, v0, v0, mem) v2.AddArg4(ptr, v0, v0, v3) v1.AddArg4(ptr, v0, v0, v2) v.AddArg4(ptr, v0, v0, v1) return true } // match: (Zero [s] ptr mem) // cond: s%16 != 0 && s%16 <= 8 && s > 16 // result: (Zero [8] (OffPtr <ptr.Type> ptr [s-8]) (Zero [s-s%16] ptr mem)) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 != 0 && s%16 <= 8 && s > 16) { break } v.reset(OpZero) v.AuxInt = int64ToAuxInt(8) v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = int64ToAuxInt(s - 8) v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = int64ToAuxInt(s - s%16) v1.AddArg2(ptr, mem) v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) // cond: s%16 != 0 && s%16 > 8 && s > 16 // result: (Zero [16] (OffPtr <ptr.Type> ptr [s-16]) (Zero [s-s%16] ptr mem)) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 != 0 && s%16 > 8 && s > 16) { break } v.reset(OpZero) v.AuxInt = int64ToAuxInt(16) v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) v1.AuxInt = int64ToAuxInt(s - s%16) v1.AddArg2(ptr, mem) v.AddArg2(v0, v1) return true } // match: (Zero [s] ptr mem) // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { break } v.reset(OpARM64DUFFZERO) v.AuxInt = int64ToAuxInt(4 * (64 - s/16)) v.AddArg2(ptr, mem) return true } // match: (Zero [s] ptr mem) // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) // result: (LoweredZero ptr (ADDconst <ptr.Type> [s-16] ptr) mem) for { s := auxIntToInt64(v.AuxInt) ptr := v_0 mem := v_1 if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { break } v.reset(OpARM64LoweredZero) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) v0.AuxInt = int64ToAuxInt(s - 16) v0.AddArg(ptr) v.AddArg3(ptr, v0, mem) return true } return false } func rewriteBlockARM64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { case BlockARM64EQ: // match: (EQ (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (EQ (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } break } // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (EQ (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (EQ (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } break } // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (EQ (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (EQ (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (EQ (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (EQ (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } break } // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (EQ (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } break } // match: (EQ (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 // result: (EQ (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMP { v_0 := b.Controls[0] _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPW x z:(NEG y)) yes no) // cond: z.Uses == 1 // result: (EQ (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPW { v_0 := b.Controls[0] _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] x) yes no) // result: (Z x yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64Z, x) return true } // match: (EQ (CMPWconst [0] x) yes no) // result: (ZW x yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64ZW, x) return true } // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (EQ (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (EQ (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (EQ (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (EQ (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64EQ, v0) return true } // match: (EQ (TSTconst [c] x) yes no) // cond: oneBit(c) // result: (TBZ [int64(ntz64(c))] x yes no) for b.Controls[0].Op == OpARM64TSTconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(c)) { break } b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(c))) return true } // match: (EQ (TSTWconst [c] x) yes no) // cond: oneBit(int64(uint32(c))) // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) for b.Controls[0].Op == OpARM64TSTWconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(int64(uint32(c)))) { break } b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) return true } // match: (EQ (FlagConstant [fc]) yes no) // cond: fc.eq() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.eq()) { break } b.Reset(BlockFirst) return true } // match: (EQ (FlagConstant [fc]) yes no) // cond: !fc.eq() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.eq()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (EQ (InvertFlags cmp) yes no) // result: (EQ cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64EQ, cmp) return true } case BlockARM64FGE: // match: (FGE (InvertFlags cmp) yes no) // result: (FLE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64FLE, cmp) return true } case BlockARM64FGT: // match: (FGT (InvertFlags cmp) yes no) // result: (FLT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64FLT, cmp) return true } case BlockARM64FLE: // match: (FLE (InvertFlags cmp) yes no) // result: (FGE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64FGE, cmp) return true } case BlockARM64FLT: // match: (FLT (InvertFlags cmp) yes no) // result: (FGT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64FGT, cmp) return true } case BlockARM64GE: // match: (GE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GE (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GE, v0) return true } break } // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GE (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GE (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GE, v0) return true } break } // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GE (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64GE, v0) return true } // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GEnoov (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GEnoov (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (GEnoov (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GEnoov, v0) return true } break } // match: (GE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (GEnoov (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GEnoov, v0) return true } break } // match: (GE (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (GEnoov (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (GEnoov (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GEnoov, v0) return true } // match: (GE (CMPWconst [0] x) yes no) // result: (TBZ [31] x yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(31) return true } // match: (GE (CMPconst [0] x) yes no) // result: (TBZ [63] x yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(63) return true } // match: (GE (FlagConstant [fc]) yes no) // cond: fc.ge() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ge()) { break } b.Reset(BlockFirst) return true } // match: (GE (FlagConstant [fc]) yes no) // cond: !fc.ge() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ge()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (GE (InvertFlags cmp) yes no) // result: (LE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64LE, cmp) return true } case BlockARM64GEnoov: // match: (GEnoov (FlagConstant [fc]) yes no) // cond: fc.geNoov() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.geNoov()) { break } b.Reset(BlockFirst) return true } // match: (GEnoov (FlagConstant [fc]) yes no) // cond: !fc.geNoov() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.geNoov()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (GEnoov (InvertFlags cmp) yes no) // result: (LEnoov cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64LEnoov, cmp) return true } case BlockARM64GT: // match: (GT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GT (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GT, v0) return true } break } // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GT (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (GT (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GT, v0) return true } break } // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GT (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64GT, v0) return true } // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GTnoov (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (GTnoov (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (GTnoov (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GTnoov, v0) return true } break } // match: (GT (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (GTnoov (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64GTnoov, v0) return true } break } // match: (GT (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (GTnoov (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (GTnoov (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64GTnoov, v0) return true } // match: (GT (FlagConstant [fc]) yes no) // cond: fc.gt() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.gt()) { break } b.Reset(BlockFirst) return true } // match: (GT (FlagConstant [fc]) yes no) // cond: !fc.gt() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.gt()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (GT (InvertFlags cmp) yes no) // result: (LT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64LT, cmp) return true } case BlockARM64GTnoov: // match: (GTnoov (FlagConstant [fc]) yes no) // cond: fc.gtNoov() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.gtNoov()) { break } b.Reset(BlockFirst) return true } // match: (GTnoov (FlagConstant [fc]) yes no) // cond: !fc.gtNoov() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.gtNoov()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (GTnoov (InvertFlags cmp) yes no) // result: (LTnoov cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64LTnoov, cmp) return true } case BlockIf: // match: (If (Equal cc) yes no) // result: (EQ cc yes no) for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64EQ, cc) return true } // match: (If (NotEqual cc) yes no) // result: (NE cc yes no) for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64NE, cc) return true } // match: (If (LessThan cc) yes no) // result: (LT cc yes no) for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64LT, cc) return true } // match: (If (LessThanU cc) yes no) // result: (ULT cc yes no) for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64ULT, cc) return true } // match: (If (LessEqual cc) yes no) // result: (LE cc yes no) for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64LE, cc) return true } // match: (If (LessEqualU cc) yes no) // result: (ULE cc yes no) for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64ULE, cc) return true } // match: (If (GreaterThan cc) yes no) // result: (GT cc yes no) for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64GT, cc) return true } // match: (If (GreaterThanU cc) yes no) // result: (UGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64UGT, cc) return true } // match: (If (GreaterEqual cc) yes no) // result: (GE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64GE, cc) return true } // match: (If (GreaterEqualU cc) yes no) // result: (UGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64UGE, cc) return true } // match: (If (LessThanF cc) yes no) // result: (FLT cc yes no) for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FLT, cc) return true } // match: (If (LessEqualF cc) yes no) // result: (FLE cc yes no) for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FLE, cc) return true } // match: (If (GreaterThanF cc) yes no) // result: (FGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FGT, cc) return true } // match: (If (GreaterEqualF cc) yes no) // result: (FGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FGE, cc) return true } // match: (If cond yes no) // result: (TBNZ [0] cond yes no) for { cond := b.Controls[0] b.resetWithControl(BlockARM64TBNZ, cond) b.AuxInt = int64ToAuxInt(0) return true } case BlockJumpTable: // match: (JumpTable idx) // result: (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB))) for { idx := b.Controls[0] v0 := b.NewValue0(b.Pos, OpARM64MOVDaddr, typ.Uintptr) v0.Aux = symToAux(makeJumpTableSym(b)) v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr) v0.AddArg(v1) b.resetWithControl2(BlockARM64JUMPTABLE, idx, v0) b.Aux = symToAux(makeJumpTableSym(b)) return true } case BlockARM64LE: // match: (LE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LE (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LE, v0) return true } break } // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LE (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LE (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LE, v0) return true } break } // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LE (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64LE, v0) return true } // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LEnoov (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LEnoov (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (LEnoov (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LEnoov, v0) return true } break } // match: (LE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (LEnoov (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LEnoov, v0) return true } break } // match: (LE (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (LEnoov (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (LEnoov (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LEnoov, v0) return true } // match: (LE (FlagConstant [fc]) yes no) // cond: fc.le() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.le()) { break } b.Reset(BlockFirst) return true } // match: (LE (FlagConstant [fc]) yes no) // cond: !fc.le() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.le()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (LE (InvertFlags cmp) yes no) // result: (GE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64GE, cmp) return true } case BlockARM64LEnoov: // match: (LEnoov (FlagConstant [fc]) yes no) // cond: fc.leNoov() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.leNoov()) { break } b.Reset(BlockFirst) return true } // match: (LEnoov (FlagConstant [fc]) yes no) // cond: !fc.leNoov() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.leNoov()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (LEnoov (InvertFlags cmp) yes no) // result: (GEnoov cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64GEnoov, cmp) return true } case BlockARM64LT: // match: (LT (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LT (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LT, v0) return true } break } // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LT (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (LT (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LT, v0) return true } break } // match: (LT (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LT (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64LT, v0) return true } // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LTnoov (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LTnoov (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (LTnoov (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LTnoov, v0) return true } break } // match: (LT (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (LTnoov (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64LTnoov, v0) return true } break } // match: (LT (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (LTnoov (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (LTnoov (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64LTnoov, v0) return true } // match: (LT (CMPWconst [0] x) yes no) // result: (TBNZ [31] x yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(31) return true } // match: (LT (CMPconst [0] x) yes no) // result: (TBNZ [63] x yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(63) return true } // match: (LT (FlagConstant [fc]) yes no) // cond: fc.lt() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.lt()) { break } b.Reset(BlockFirst) return true } // match: (LT (FlagConstant [fc]) yes no) // cond: !fc.lt() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.lt()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (LT (InvertFlags cmp) yes no) // result: (GT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64GT, cmp) return true } case BlockARM64LTnoov: // match: (LTnoov (FlagConstant [fc]) yes no) // cond: fc.ltNoov() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ltNoov()) { break } b.Reset(BlockFirst) return true } // match: (LTnoov (FlagConstant [fc]) yes no) // cond: !fc.ltNoov() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ltNoov()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (LTnoov (InvertFlags cmp) yes no) // result: (GTnoov cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64GTnoov, cmp) return true } case BlockARM64NE: // match: (NE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (NE (TST x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } break } // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (NE (TSTconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 // result: (NE (TSTW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64AND { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } break } // match: (NE (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (NE (TSTWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ANDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (NE (CMNconst [c] y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = int64ToAuxInt(c) v0.AddArg(y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (NE (CMNWconst [int32(c)] y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] if x.Op != OpARM64ADDconst { break } c := auxIntToInt64(x.AuxInt) y := x.Args[0] if !(x.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(int32(c)) v0.AddArg(y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (NE (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } break } // match: (NE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (NE (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64ADD { break } _ = z.Args[1] z_0 := z.Args[0] z_1 := z.Args[1] for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { x := z_0 y := z_1 if !(z.Uses == 1) { continue } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } break } // match: (NE (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 // result: (NE (CMN x y) yes no) for b.Controls[0].Op == OpARM64CMP { v_0 := b.Controls[0] _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPW x z:(NEG y)) yes no) // cond: z.Uses == 1 // result: (NE (CMNW x y) yes no) for b.Controls[0].Op == OpARM64CMPW { v_0 := b.Controls[0] _ = v_0.Args[1] x := v_0.Args[0] z := v_0.Args[1] if z.Op != OpARM64NEG { break } y := z.Args[0] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v0.AddArg2(x, y) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] x) yes no) // result: (NZ x yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64NZ, x) return true } // match: (NE (CMPWconst [0] x) yes no) // result: (NZW x yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } x := v_0.Args[0] b.resetWithControl(BlockARM64NZW, x) return true } // match: (NE (CMPconst [0] z:(MADD a x y)) yes no) // cond: z.Uses==1 // result: (NE (CMN a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADD { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no) // cond: z.Uses==1 // result: (NE (CMP a (MUL <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUB { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no) // cond: z.Uses==1 // result: (NE (CMNW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MADDW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no) // cond: z.Uses==1 // result: (NE (CMPW a (MULW <x.Type> x y)) yes no) for b.Controls[0].Op == OpARM64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } z := v_0.Args[0] if z.Op != OpARM64MSUBW { break } y := z.Args[2] a := z.Args[0] x := z.Args[1] if !(z.Uses == 1) { break } v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags) v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type) v1.AddArg2(x, y) v0.AddArg2(a, v1) b.resetWithControl(BlockARM64NE, v0) return true } // match: (NE (TSTconst [c] x) yes no) // cond: oneBit(c) // result: (TBNZ [int64(ntz64(c))] x yes no) for b.Controls[0].Op == OpARM64TSTconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(c)) { break } b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(c))) return true } // match: (NE (TSTWconst [c] x) yes no) // cond: oneBit(int64(uint32(c))) // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) for b.Controls[0].Op == OpARM64TSTWconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(int64(uint32(c)))) { break } b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) return true } // match: (NE (FlagConstant [fc]) yes no) // cond: fc.ne() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ne()) { break } b.Reset(BlockFirst) return true } // match: (NE (FlagConstant [fc]) yes no) // cond: !fc.ne() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ne()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (NE (InvertFlags cmp) yes no) // result: (NE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64NE, cmp) return true } case BlockARM64NZ: // match: (NZ (Equal cc) yes no) // result: (EQ cc yes no) for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64EQ, cc) return true } // match: (NZ (NotEqual cc) yes no) // result: (NE cc yes no) for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64NE, cc) return true } // match: (NZ (LessThan cc) yes no) // result: (LT cc yes no) for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64LT, cc) return true } // match: (NZ (LessThanU cc) yes no) // result: (ULT cc yes no) for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64ULT, cc) return true } // match: (NZ (LessEqual cc) yes no) // result: (LE cc yes no) for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64LE, cc) return true } // match: (NZ (LessEqualU cc) yes no) // result: (ULE cc yes no) for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64ULE, cc) return true } // match: (NZ (GreaterThan cc) yes no) // result: (GT cc yes no) for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64GT, cc) return true } // match: (NZ (GreaterThanU cc) yes no) // result: (UGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64UGT, cc) return true } // match: (NZ (GreaterEqual cc) yes no) // result: (GE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64GE, cc) return true } // match: (NZ (GreaterEqualU cc) yes no) // result: (UGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64UGE, cc) return true } // match: (NZ (LessThanF cc) yes no) // result: (FLT cc yes no) for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FLT, cc) return true } // match: (NZ (LessEqualF cc) yes no) // result: (FLE cc yes no) for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FLE, cc) return true } // match: (NZ (GreaterThanF cc) yes no) // result: (FGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FGT, cc) return true } // match: (NZ (GreaterEqualF cc) yes no) // result: (FGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] b.resetWithControl(BlockARM64FGE, cc) return true } // match: (NZ (ANDconst [c] x) yes no) // cond: oneBit(c) // result: (TBNZ [int64(ntz64(c))] x yes no) for b.Controls[0].Op == OpARM64ANDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(c)) { break } b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(c))) return true } // match: (NZ (MOVDconst [0]) yes no) // result: (First no yes) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (NZ (MOVDconst [c]) yes no) // cond: c != 0 // result: (First yes no) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(c != 0) { break } b.Reset(BlockFirst) return true } case BlockARM64NZW: // match: (NZW (ANDconst [c] x) yes no) // cond: oneBit(int64(uint32(c))) // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) for b.Controls[0].Op == OpARM64ANDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(int64(uint32(c)))) { break } b.resetWithControl(BlockARM64TBNZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) return true } // match: (NZW (MOVDconst [c]) yes no) // cond: int32(c) == 0 // result: (First no yes) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(int32(c) == 0) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (NZW (MOVDconst [c]) yes no) // cond: int32(c) != 0 // result: (First yes no) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(int32(c) != 0) { break } b.Reset(BlockFirst) return true } case BlockARM64TBNZ: // match: (TBNZ [0] (Equal cc) yes no) // result: (EQ cc yes no) for b.Controls[0].Op == OpARM64Equal { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64EQ, cc) return true } // match: (TBNZ [0] (NotEqual cc) yes no) // result: (NE cc yes no) for b.Controls[0].Op == OpARM64NotEqual { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64NE, cc) return true } // match: (TBNZ [0] (LessThan cc) yes no) // result: (LT cc yes no) for b.Controls[0].Op == OpARM64LessThan { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64LT, cc) return true } // match: (TBNZ [0] (LessThanU cc) yes no) // result: (ULT cc yes no) for b.Controls[0].Op == OpARM64LessThanU { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64ULT, cc) return true } // match: (TBNZ [0] (LessEqual cc) yes no) // result: (LE cc yes no) for b.Controls[0].Op == OpARM64LessEqual { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64LE, cc) return true } // match: (TBNZ [0] (LessEqualU cc) yes no) // result: (ULE cc yes no) for b.Controls[0].Op == OpARM64LessEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64ULE, cc) return true } // match: (TBNZ [0] (GreaterThan cc) yes no) // result: (GT cc yes no) for b.Controls[0].Op == OpARM64GreaterThan { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64GT, cc) return true } // match: (TBNZ [0] (GreaterThanU cc) yes no) // result: (UGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanU { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64UGT, cc) return true } // match: (TBNZ [0] (GreaterEqual cc) yes no) // result: (GE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqual { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64GE, cc) return true } // match: (TBNZ [0] (GreaterEqualU cc) yes no) // result: (UGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualU { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64UGE, cc) return true } // match: (TBNZ [0] (LessThanF cc) yes no) // result: (FLT cc yes no) for b.Controls[0].Op == OpARM64LessThanF { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64FLT, cc) return true } // match: (TBNZ [0] (LessEqualF cc) yes no) // result: (FLE cc yes no) for b.Controls[0].Op == OpARM64LessEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64FLE, cc) return true } // match: (TBNZ [0] (GreaterThanF cc) yes no) // result: (FGT cc yes no) for b.Controls[0].Op == OpARM64GreaterThanF { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64FGT, cc) return true } // match: (TBNZ [0] (GreaterEqualF cc) yes no) // result: (FGE cc yes no) for b.Controls[0].Op == OpARM64GreaterEqualF { v_0 := b.Controls[0] cc := v_0.Args[0] if auxIntToInt64(b.AuxInt) != 0 { break } b.resetWithControl(BlockARM64FGE, cc) return true } case BlockARM64UGE: // match: (UGE (FlagConstant [fc]) yes no) // cond: fc.uge() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.uge()) { break } b.Reset(BlockFirst) return true } // match: (UGE (FlagConstant [fc]) yes no) // cond: !fc.uge() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.uge()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (UGE (InvertFlags cmp) yes no) // result: (ULE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64ULE, cmp) return true } case BlockARM64UGT: // match: (UGT (FlagConstant [fc]) yes no) // cond: fc.ugt() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ugt()) { break } b.Reset(BlockFirst) return true } // match: (UGT (FlagConstant [fc]) yes no) // cond: !fc.ugt() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ugt()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (UGT (InvertFlags cmp) yes no) // result: (ULT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64ULT, cmp) return true } case BlockARM64ULE: // match: (ULE (FlagConstant [fc]) yes no) // cond: fc.ule() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ule()) { break } b.Reset(BlockFirst) return true } // match: (ULE (FlagConstant [fc]) yes no) // cond: !fc.ule() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ule()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (ULE (InvertFlags cmp) yes no) // result: (UGE cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64UGE, cmp) return true } case BlockARM64ULT: // match: (ULT (FlagConstant [fc]) yes no) // cond: fc.ult() // result: (First yes no) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(fc.ult()) { break } b.Reset(BlockFirst) return true } // match: (ULT (FlagConstant [fc]) yes no) // cond: !fc.ult() // result: (First no yes) for b.Controls[0].Op == OpARM64FlagConstant { v_0 := b.Controls[0] fc := auxIntToFlagConstant(v_0.AuxInt) if !(!fc.ult()) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } // match: (ULT (InvertFlags cmp) yes no) // result: (UGT cmp yes no) for b.Controls[0].Op == OpARM64InvertFlags { v_0 := b.Controls[0] cmp := v_0.Args[0] b.resetWithControl(BlockARM64UGT, cmp) return true } case BlockARM64Z: // match: (Z (ANDconst [c] x) yes no) // cond: oneBit(c) // result: (TBZ [int64(ntz64(c))] x yes no) for b.Controls[0].Op == OpARM64ANDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(c)) { break } b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(c))) return true } // match: (Z (MOVDconst [0]) yes no) // result: (First yes no) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } b.Reset(BlockFirst) return true } // match: (Z (MOVDconst [c]) yes no) // cond: c != 0 // result: (First no yes) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(c != 0) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } case BlockARM64ZW: // match: (ZW (ANDconst [c] x) yes no) // cond: oneBit(int64(uint32(c))) // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) for b.Controls[0].Op == OpARM64ANDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] if !(oneBit(int64(uint32(c)))) { break } b.resetWithControl(BlockARM64TBZ, x) b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c))))) return true } // match: (ZW (MOVDconst [c]) yes no) // cond: int32(c) == 0 // result: (First yes no) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(int32(c) == 0) { break } b.Reset(BlockFirst) return true } // match: (ZW (MOVDconst [c]) yes no) // cond: int32(c) != 0 // result: (First no yes) for b.Controls[0].Op == OpARM64MOVDconst { v_0 := b.Controls[0] c := auxIntToInt64(v_0.AuxInt) if !(int32(c) != 0) { break } b.Reset(BlockFirst) b.swapSuccessors() return true } } return false }
go/src/cmd/compile/internal/ssa/rewriteARM64.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/rewriteARM64.go", "repo_id": "go", "token_count": 334834 }
105
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa import ( "fmt" ) // ---------------------------------------------------------------------------- // Sparse Conditional Constant Propagation // // Described in // Mark N. Wegman, F. Kenneth Zadeck: Constant Propagation with Conditional Branches. // TOPLAS 1991. // // This algorithm uses three level lattice for SSA value // // Top undefined // / | \ // .. 1 2 3 .. constant // \ | / // Bottom not constant // // It starts with optimistically assuming that all SSA values are initially Top // and then propagates constant facts only along reachable control flow paths. // Since some basic blocks are not visited yet, corresponding inputs of phi become // Top, we use the meet(phi) to compute its lattice. // // Top ∩ any = any // Bottom ∩ any = Bottom // ConstantA ∩ ConstantA = ConstantA // ConstantA ∩ ConstantB = Bottom // // Each lattice value is lowered most twice(Top to Constant, Constant to Bottom) // due to lattice depth, resulting in a fast convergence speed of the algorithm. // In this way, sccp can discover optimization opportunities that cannot be found // by just combining constant folding and constant propagation and dead code // elimination separately. // Three level lattice holds compile time knowledge about SSA value const ( top int8 = iota // undefined constant // constant bottom // not a constant ) type lattice struct { tag int8 // lattice type val *Value // constant value } type worklist struct { f *Func // the target function to be optimized out edges []Edge // propagate constant facts through edges uses []*Value // re-visiting set visited map[Edge]bool // visited edges latticeCells map[*Value]lattice // constant lattices defUse map[*Value][]*Value // def-use chains for some values defBlock map[*Value][]*Block // use blocks of def visitedBlock []bool // visited block } // sccp stands for sparse conditional constant propagation, it propagates constants // through CFG conditionally and applies constant folding, constant replacement and // dead code elimination all together. func sccp(f *Func) { var t worklist t.f = f t.edges = make([]Edge, 0) t.visited = make(map[Edge]bool) t.edges = append(t.edges, Edge{f.Entry, 0}) t.defUse = make(map[*Value][]*Value) t.defBlock = make(map[*Value][]*Block) t.latticeCells = make(map[*Value]lattice) t.visitedBlock = f.Cache.allocBoolSlice(f.NumBlocks()) defer f.Cache.freeBoolSlice(t.visitedBlock) // build it early since we rely heavily on the def-use chain later t.buildDefUses() // pick up either an edge or SSA value from worklist, process it for { if len(t.edges) > 0 { edge := t.edges[0] t.edges = t.edges[1:] if _, exist := t.visited[edge]; !exist { dest := edge.b destVisited := t.visitedBlock[dest.ID] // mark edge as visited t.visited[edge] = true t.visitedBlock[dest.ID] = true for _, val := range dest.Values { if val.Op == OpPhi || !destVisited { t.visitValue(val) } } // propagates constants facts through CFG, taking condition test // into account if !destVisited { t.propagate(dest) } } continue } if len(t.uses) > 0 { use := t.uses[0] t.uses = t.uses[1:] t.visitValue(use) continue } break } // apply optimizations based on discovered constants constCnt, rewireCnt := t.replaceConst() if f.pass.debug > 0 { if constCnt > 0 || rewireCnt > 0 { fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt) } } } func equals(a, b lattice) bool { if a == b { // fast path return true } if a.tag != b.tag { return false } if a.tag == constant { // The same content of const value may be different, we should // compare with auxInt instead v1 := a.val v2 := b.val if v1.Op == v2.Op && v1.AuxInt == v2.AuxInt { return true } else { return false } } return true } // possibleConst checks if Value can be folded to const. For those Values that can // never become constants(e.g. StaticCall), we don't make futile efforts. func possibleConst(val *Value) bool { if isConst(val) { return true } switch val.Op { case OpCopy: return true case OpPhi: return true case // negate OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F, OpCom8, OpCom16, OpCom32, OpCom64, // math OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt, // conversion OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F, OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64, OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F, OpCvtBoolToUint8, OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32, OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64, // bit OpCtz8, OpCtz16, OpCtz32, OpCtz64, // mask OpSlicemask, // safety check OpIsNonNil, // not OpNot: return true case // add OpAdd64, OpAdd32, OpAdd16, OpAdd8, OpAdd32F, OpAdd64F, // sub OpSub64, OpSub32, OpSub16, OpSub8, OpSub32F, OpSub64F, // mul OpMul64, OpMul32, OpMul16, OpMul8, OpMul32F, OpMul64F, // div OpDiv32F, OpDiv64F, OpDiv8, OpDiv16, OpDiv32, OpDiv64, OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, OpMod8, OpMod16, OpMod32, OpMod64, OpMod8u, OpMod16u, OpMod32u, OpMod64u, // compare OpEq64, OpEq32, OpEq16, OpEq8, OpEq32F, OpEq64F, OpLess64, OpLess32, OpLess16, OpLess8, OpLess64U, OpLess32U, OpLess16U, OpLess8U, OpLess32F, OpLess64F, OpLeq64, OpLeq32, OpLeq16, OpLeq8, OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U, OpLeq32F, OpLeq64F, OpEqB, OpNeqB, // shift OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64, OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64, OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64, // safety check OpIsInBounds, OpIsSliceInBounds, // bit OpAnd8, OpAnd16, OpAnd32, OpAnd64, OpOr8, OpOr16, OpOr32, OpOr64, OpXor8, OpXor16, OpXor32, OpXor64: return true default: return false } } func (t *worklist) getLatticeCell(val *Value) lattice { if !possibleConst(val) { // they are always worst return lattice{bottom, nil} } lt, exist := t.latticeCells[val] if !exist { return lattice{top, nil} // optimistically for un-visited value } return lt } func isConst(val *Value) bool { switch val.Op { case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F: return true default: return false } } // buildDefUses builds def-use chain for some values early, because once the // lattice of a value is changed, we need to update lattices of use. But we don't // need all uses of it, only uses that can become constants would be added into // re-visit worklist since no matter how many times they are revisited, uses which // can't become constants lattice remains unchanged, i.e. Bottom. func (t *worklist) buildDefUses() { for _, block := range t.f.Blocks { for _, val := range block.Values { for _, arg := range val.Args { // find its uses, only uses that can become constants take into account if possibleConst(arg) && possibleConst(val) { if _, exist := t.defUse[arg]; !exist { t.defUse[arg] = make([]*Value, 0, arg.Uses) } t.defUse[arg] = append(t.defUse[arg], val) } } } for _, ctl := range block.ControlValues() { // for control values that can become constants, find their use blocks if possibleConst(ctl) { t.defBlock[ctl] = append(t.defBlock[ctl], block) } } } } // addUses finds all uses of value and appends them into work list for further process func (t *worklist) addUses(val *Value) { for _, use := range t.defUse[val] { if val == use { // Phi may refer to itself as uses, ignore them to avoid re-visiting phi // for performance reason continue } t.uses = append(t.uses, use) } for _, block := range t.defBlock[val] { if t.visitedBlock[block.ID] { t.propagate(block) } } } // meet meets all of phi arguments and computes result lattice func (t *worklist) meet(val *Value) lattice { optimisticLt := lattice{top, nil} for i := 0; i < len(val.Args); i++ { edge := Edge{val.Block, i} // If incoming edge for phi is not visited, assume top optimistically. // According to rules of meet: // Top ∩ any = any // Top participates in meet() but does not affect the result, so here // we will ignore Top and only take other lattices into consideration. if _, exist := t.visited[edge]; exist { lt := t.getLatticeCell(val.Args[i]) if lt.tag == constant { if optimisticLt.tag == top { optimisticLt = lt } else { if !equals(optimisticLt, lt) { // ConstantA ∩ ConstantB = Bottom return lattice{bottom, nil} } } } else if lt.tag == bottom { // Bottom ∩ any = Bottom return lattice{bottom, nil} } else { // Top ∩ any = any } } else { // Top ∩ any = any } } // ConstantA ∩ ConstantA = ConstantA or Top ∩ any = any return optimisticLt } func computeLattice(f *Func, val *Value, args ...*Value) lattice { // In general, we need to perform constant evaluation based on constant args: // // res := lattice{constant, nil} // switch op { // case OpAdd16: // res.val = newConst(argLt1.val.AuxInt16() + argLt2.val.AuxInt16()) // case OpAdd32: // res.val = newConst(argLt1.val.AuxInt32() + argLt2.val.AuxInt32()) // case OpDiv8: // if !isDivideByZero(argLt2.val.AuxInt8()) { // res.val = newConst(argLt1.val.AuxInt8() / argLt2.val.AuxInt8()) // } // ... // } // // However, this would create a huge switch for all opcodes that can be // evaluated during compile time. Moreover, some operations can be evaluated // only if its arguments satisfy additional conditions(e.g. divide by zero). // It's fragile and error-prone. We did a trick by reusing the existing rules // in generic rules for compile-time evaluation. But generic rules rewrite // original value, this behavior is undesired, because the lattice of values // may change multiple times, once it was rewritten, we lose the opportunity // to change it permanently, which can lead to errors. For example, We cannot // change its value immediately after visiting Phi, because some of its input // edges may still not be visited at this moment. constValue := f.newValue(val.Op, val.Type, f.Entry, val.Pos) constValue.AddArgs(args...) matched := rewriteValuegeneric(constValue) if matched { if isConst(constValue) { return lattice{constant, constValue} } } // Either we can not match generic rules for given value or it does not // satisfy additional constraints(e.g. divide by zero), in these cases, clean // up temporary value immediately in case they are not dominated by their args. constValue.reset(OpInvalid) return lattice{bottom, nil} } func (t *worklist) visitValue(val *Value) { if !possibleConst(val) { // fast fail for always worst Values, i.e. there is no lowering happen // on them, their lattices must be initially worse Bottom. return } oldLt := t.getLatticeCell(val) defer func() { // re-visit all uses of value if its lattice is changed newLt := t.getLatticeCell(val) if !equals(newLt, oldLt) { if int8(oldLt.tag) > int8(newLt.tag) { t.f.Fatalf("Must lower lattice\n") } t.addUses(val) } }() switch val.Op { // they are constant values, aren't they? case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F: //TODO: support ConstNil ConstString etc t.latticeCells[val] = lattice{constant, val} // lattice value of copy(x) actually means lattice value of (x) case OpCopy: t.latticeCells[val] = t.getLatticeCell(val.Args[0]) // phi should be processed specially case OpPhi: t.latticeCells[val] = t.meet(val) // fold 1-input operations: case // negate OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F, OpCom8, OpCom16, OpCom32, OpCom64, // math OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt, // conversion OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F, OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64, OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F, OpCvtBoolToUint8, OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32, OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64, // bit OpCtz8, OpCtz16, OpCtz32, OpCtz64, // mask OpSlicemask, // safety check OpIsNonNil, // not OpNot: lt1 := t.getLatticeCell(val.Args[0]) if lt1.tag == constant { // here we take a shortcut by reusing generic rules to fold constants t.latticeCells[val] = computeLattice(t.f, val, lt1.val) } else { t.latticeCells[val] = lattice{lt1.tag, nil} } // fold 2-input operations case // add OpAdd64, OpAdd32, OpAdd16, OpAdd8, OpAdd32F, OpAdd64F, // sub OpSub64, OpSub32, OpSub16, OpSub8, OpSub32F, OpSub64F, // mul OpMul64, OpMul32, OpMul16, OpMul8, OpMul32F, OpMul64F, // div OpDiv32F, OpDiv64F, OpDiv8, OpDiv16, OpDiv32, OpDiv64, OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, //TODO: support div128u // mod OpMod8, OpMod16, OpMod32, OpMod64, OpMod8u, OpMod16u, OpMod32u, OpMod64u, // compare OpEq64, OpEq32, OpEq16, OpEq8, OpEq32F, OpEq64F, OpLess64, OpLess32, OpLess16, OpLess8, OpLess64U, OpLess32U, OpLess16U, OpLess8U, OpLess32F, OpLess64F, OpLeq64, OpLeq32, OpLeq16, OpLeq8, OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U, OpLeq32F, OpLeq64F, OpEqB, OpNeqB, // shift OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64, OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64, OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64, // safety check OpIsInBounds, OpIsSliceInBounds, // bit OpAnd8, OpAnd16, OpAnd32, OpAnd64, OpOr8, OpOr16, OpOr32, OpOr64, OpXor8, OpXor16, OpXor32, OpXor64: lt1 := t.getLatticeCell(val.Args[0]) lt2 := t.getLatticeCell(val.Args[1]) if lt1.tag == constant && lt2.tag == constant { // here we take a shortcut by reusing generic rules to fold constants t.latticeCells[val] = computeLattice(t.f, val, lt1.val, lt2.val) } else { if lt1.tag == bottom || lt2.tag == bottom { t.latticeCells[val] = lattice{bottom, nil} } else { t.latticeCells[val] = lattice{top, nil} } } default: // Any other type of value cannot be a constant, they are always worst(Bottom) } } // propagate propagates constants facts through CFG. If the block has single successor, // add the successor anyway. If the block has multiple successors, only add the // branch destination corresponding to lattice value of condition value. func (t *worklist) propagate(block *Block) { switch block.Kind { case BlockExit, BlockRet, BlockRetJmp, BlockInvalid: // control flow ends, do nothing then break case BlockDefer: // we know nothing about control flow, add all branch destinations t.edges = append(t.edges, block.Succs...) case BlockFirst: fallthrough // always takes the first branch case BlockPlain: t.edges = append(t.edges, block.Succs[0]) case BlockIf, BlockJumpTable: cond := block.ControlValues()[0] condLattice := t.getLatticeCell(cond) if condLattice.tag == bottom { // we know nothing about control flow, add all branch destinations t.edges = append(t.edges, block.Succs...) } else if condLattice.tag == constant { // add branchIdx destinations depends on its condition var branchIdx int64 if block.Kind == BlockIf { branchIdx = 1 - condLattice.val.AuxInt } else { branchIdx = condLattice.val.AuxInt } t.edges = append(t.edges, block.Succs[branchIdx]) } else { // condition value is not visited yet, don't propagate it now } default: t.f.Fatalf("All kind of block should be processed above.") } } // rewireSuccessor rewires corresponding successors according to constant value // discovered by previous analysis. As the result, some successors become unreachable // and thus can be removed in further deadcode phase func rewireSuccessor(block *Block, constVal *Value) bool { switch block.Kind { case BlockIf: block.removeEdge(int(constVal.AuxInt)) block.Kind = BlockPlain block.Likely = BranchUnknown block.ResetControls() return true case BlockJumpTable: // Remove everything but the known taken branch. idx := int(constVal.AuxInt) if idx < 0 || idx >= len(block.Succs) { // This can only happen in unreachable code, // as an invariant of jump tables is that their // input index is in range. // See issue 64826. return false } block.swapSuccessorsByIdx(0, idx) for len(block.Succs) > 1 { block.removeEdge(1) } block.Kind = BlockPlain block.Likely = BranchUnknown block.ResetControls() return true default: return false } } // replaceConst will replace non-constant values that have been proven by sccp // to be constants. func (t *worklist) replaceConst() (int, int) { constCnt, rewireCnt := 0, 0 for val, lt := range t.latticeCells { if lt.tag == constant { if !isConst(val) { if t.f.pass.debug > 0 { fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString()) } val.reset(lt.val.Op) val.AuxInt = lt.val.AuxInt constCnt++ } // If const value controls this block, rewires successors according to its value ctrlBlock := t.defBlock[val] for _, block := range ctrlBlock { if rewireSuccessor(block, lt.val) { rewireCnt++ if t.f.pass.debug > 0 { fmt.Printf("Rewire %v %v successors\n", block.Kind, block) } } } } } return constCnt, rewireCnt }
go/src/cmd/compile/internal/ssa/sccp.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/sccp.go", "repo_id": "go", "token_count": 7336 }
106
package main import "fmt" func F[T any](n T) { fmt.Printf("called\n") } func G[T any](n T) { F(n) fmt.Printf("after\n") } func main() { G(3) }
go/src/cmd/compile/internal/ssa/testdata/convertline.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/testdata/convertline.go", "repo_id": "go", "token_count": 82 }
107
package foo func f(m, n int) int { a := g(n) b := g(m) return a + b } func g(x int) int { y := h(x + 1) z := h(x - 1) return y + z } func h(x int) int { return x * x }
go/src/cmd/compile/internal/ssa/testdata/inline-dump.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/testdata/inline-dump.go", "repo_id": "go", "token_count": 95 }
108
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssa import "testing" type extTest struct { f func(uint64, uint64) uint64 arg1 uint64 arg2 uint64 res uint64 name string } var extTests = [...]extTest{ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 / op2)) }, arg1: 0x1, arg2: 0xfffffffeffffffff, res: 0xffffffff, name: "div"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 * op2)) }, arg1: 0x1, arg2: 0x100000001, res: 0x1, name: "mul"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 + op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x0, name: "add"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 - op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x2, name: "sub"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 | op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xffffffff, name: "or"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 ^ op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xfffffffe, name: "xor"}, {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 & op2)) }, arg1: 0x100000000000001, arg2: 0x100000000000001, res: 0x1, name: "and"}, } func TestZeroExtension(t *testing.T) { for _, x := range extTests { r := x.f(x.arg1, x.arg2) if x.res != r { t.Errorf("%s: got %d want %d", x.name, r, x.res) } } }
go/src/cmd/compile/internal/ssa/zeroextension_test.go/0
{ "file_path": "go/src/cmd/compile/internal/ssa/zeroextension_test.go", "repo_id": "go", "token_count": 738 }
109
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package syntax import ( "fmt" "strings" "testing" ) // A test is a source code snippet of a particular node type. // In the snippet, a '@' indicates the position recorded by // the parser when creating the respective node. type test struct { nodetyp string snippet string } var decls = []test{ // The position of declarations is always the // position of the first token of an individual // declaration, independent of grouping. {"ImportDecl", `import @"math"`}, {"ImportDecl", `import @mymath "math"`}, {"ImportDecl", `import @. "math"`}, {"ImportDecl", `import (@"math")`}, {"ImportDecl", `import (@mymath "math")`}, {"ImportDecl", `import (@. "math")`}, {"ConstDecl", `const @x`}, {"ConstDecl", `const @x = 0`}, {"ConstDecl", `const @x, y, z = 0, 1, 2`}, {"ConstDecl", `const (@x)`}, {"ConstDecl", `const (@x = 0)`}, {"ConstDecl", `const (@x, y, z = 0, 1, 2)`}, {"TypeDecl", `type @T int`}, {"TypeDecl", `type @T = int`}, {"TypeDecl", `type (@T int)`}, {"TypeDecl", `type (@T = int)`}, {"VarDecl", `var @x int`}, {"VarDecl", `var @x, y, z int`}, {"VarDecl", `var @x int = 0`}, {"VarDecl", `var @x, y, z int = 1, 2, 3`}, {"VarDecl", `var @x = 0`}, {"VarDecl", `var @x, y, z = 1, 2, 3`}, {"VarDecl", `var (@x int)`}, {"VarDecl", `var (@x, y, z int)`}, {"VarDecl", `var (@x int = 0)`}, {"VarDecl", `var (@x, y, z int = 1, 2, 3)`}, {"VarDecl", `var (@x = 0)`}, {"VarDecl", `var (@x, y, z = 1, 2, 3)`}, {"FuncDecl", `func @f() {}`}, {"FuncDecl", `func @(T) f() {}`}, {"FuncDecl", `func @(x T) f() {}`}, } var exprs = []test{ // The position of an expression is the position // of the left-most token that identifies the // kind of expression. {"Name", `@x`}, {"BasicLit", `@0`}, {"BasicLit", `@0x123`}, {"BasicLit", `@3.1415`}, {"BasicLit", `@.2718`}, {"BasicLit", `@1i`}, {"BasicLit", `@'a'`}, {"BasicLit", `@"abc"`}, {"BasicLit", "@`abc`"}, {"CompositeLit", `@{}`}, {"CompositeLit", `T@{}`}, {"CompositeLit", `struct{x, y int}@{}`}, {"KeyValueExpr", `"foo"@: true`}, {"KeyValueExpr", `"a"@: b`}, {"FuncLit", `@func (){}`}, {"ParenExpr", `@(x)`}, {"SelectorExpr", `a@.b`}, {"IndexExpr", `a@[i]`}, {"SliceExpr", `a@[:]`}, {"SliceExpr", `a@[i:]`}, {"SliceExpr", `a@[:j]`}, {"SliceExpr", `a@[i:j]`}, {"SliceExpr", `a@[i:j:k]`}, {"AssertExpr", `x@.(T)`}, {"Operation", `@*b`}, {"Operation", `@+b`}, {"Operation", `@-b`}, {"Operation", `@!b`}, {"Operation", `@^b`}, {"Operation", `@&b`}, {"Operation", `@<-b`}, {"Operation", `a @|| b`}, {"Operation", `a @&& b`}, {"Operation", `a @== b`}, {"Operation", `a @+ b`}, {"Operation", `a @* b`}, {"CallExpr", `f@()`}, {"CallExpr", `f@(x, y, z)`}, {"CallExpr", `obj.f@(1, 2, 3)`}, {"CallExpr", `func(x int) int { return x + 1 }@(y)`}, // ListExpr: tested via multi-value const/var declarations } var types = []test{ {"Operation", `@*T`}, {"Operation", `@*struct{}`}, {"ArrayType", `@[10]T`}, {"ArrayType", `@[...]T`}, {"SliceType", `@[]T`}, {"DotsType", `@...T`}, {"StructType", `@struct{}`}, {"InterfaceType", `@interface{}`}, {"FuncType", `func@()`}, {"MapType", `@map[T]T`}, {"ChanType", `@chan T`}, {"ChanType", `@chan<- T`}, {"ChanType", `@<-chan T`}, } var fields = []test{ {"Field", `@T`}, {"Field", `@(T)`}, {"Field", `@x T`}, {"Field", `@x *(T)`}, {"Field", `@x, y, z T`}, {"Field", `@x, y, z (*T)`}, } var stmts = []test{ {"EmptyStmt", `@`}, {"LabeledStmt", `L@:`}, {"LabeledStmt", `L@: ;`}, {"LabeledStmt", `L@: f()`}, {"BlockStmt", `@{}`}, // The position of an ExprStmt is the position of the expression. {"ExprStmt", `@<-ch`}, {"ExprStmt", `f@()`}, {"ExprStmt", `append@(s, 1, 2, 3)`}, {"SendStmt", `ch @<- x`}, {"DeclStmt", `@const x = 0`}, {"DeclStmt", `@const (x = 0)`}, {"DeclStmt", `@type T int`}, {"DeclStmt", `@type T = int`}, {"DeclStmt", `@type (T1 = int; T2 = float32)`}, {"DeclStmt", `@var x = 0`}, {"DeclStmt", `@var x, y, z int`}, {"DeclStmt", `@var (a, b = 1, 2)`}, {"AssignStmt", `x @= y`}, {"AssignStmt", `a, b, x @= 1, 2, 3`}, {"AssignStmt", `x @+= y`}, {"AssignStmt", `x @:= y`}, {"AssignStmt", `x, ok @:= f()`}, {"AssignStmt", `x@++`}, {"AssignStmt", `a[i]@--`}, {"BranchStmt", `@break`}, {"BranchStmt", `@break L`}, {"BranchStmt", `@continue`}, {"BranchStmt", `@continue L`}, {"BranchStmt", `@fallthrough`}, {"BranchStmt", `@goto L`}, {"CallStmt", `@defer f()`}, {"CallStmt", `@go f()`}, {"ReturnStmt", `@return`}, {"ReturnStmt", `@return x`}, {"ReturnStmt", `@return a, b, a + b*f(1, 2, 3)`}, {"IfStmt", `@if cond {}`}, {"IfStmt", `@if cond { f() } else {}`}, {"IfStmt", `@if cond { f() } else { g(); h() }`}, {"ForStmt", `@for {}`}, {"ForStmt", `@for { f() }`}, {"SwitchStmt", `@switch {}`}, {"SwitchStmt", `@switch { default: }`}, {"SwitchStmt", `@switch { default: x++ }`}, {"SelectStmt", `@select {}`}, {"SelectStmt", `@select { default: }`}, {"SelectStmt", `@select { default: ch <- false }`}, } var ranges = []test{ {"RangeClause", `@range s`}, {"RangeClause", `i = @range s`}, {"RangeClause", `i := @range s`}, {"RangeClause", `_, x = @range s`}, {"RangeClause", `i, x = @range s`}, {"RangeClause", `_, x := @range s.f`}, {"RangeClause", `i, x := @range f(i)`}, } var guards = []test{ {"TypeSwitchGuard", `x@.(type)`}, {"TypeSwitchGuard", `x := x@.(type)`}, } var cases = []test{ {"CaseClause", `@case x:`}, {"CaseClause", `@case x, y, z:`}, {"CaseClause", `@case x == 1, y == 2:`}, {"CaseClause", `@default:`}, } var comms = []test{ {"CommClause", `@case <-ch:`}, {"CommClause", `@case x <- ch:`}, {"CommClause", `@case x = <-ch:`}, {"CommClause", `@case x := <-ch:`}, {"CommClause", `@case x, ok = <-ch: f(1, 2, 3)`}, {"CommClause", `@case x, ok := <-ch: x++`}, {"CommClause", `@default:`}, {"CommClause", `@default: ch <- true`}, } func TestPos(t *testing.T) { // TODO(gri) Once we have a general tree walker, we can use that to find // the first occurrence of the respective node and we don't need to hand- // extract the node for each specific kind of construct. testPos(t, decls, "package p; ", "", func(f *File) Node { return f.DeclList[0] }, ) // embed expressions in a composite literal so we can test key:value and naked composite literals testPos(t, exprs, "package p; var _ = T{ ", " }", func(f *File) Node { return f.DeclList[0].(*VarDecl).Values.(*CompositeLit).ElemList[0] }, ) // embed types in a function signature so we can test ... types testPos(t, types, "package p; func f(", ")", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0].Type }, ) testPos(t, fields, "package p; func f(", ")", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0] }, ) testPos(t, stmts, "package p; func _() { ", "; }", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0] }, ) testPos(t, ranges, "package p; func _() { for ", " {} }", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*ForStmt).Init.(*RangeClause) }, ) testPos(t, guards, "package p; func _() { switch ", " {} }", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Tag.(*TypeSwitchGuard) }, ) testPos(t, cases, "package p; func _() { switch { ", " } }", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Body[0] }, ) testPos(t, comms, "package p; func _() { select { ", " } }", func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SelectStmt).Body[0] }, ) } func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*File) Node) { for _, test := range list { // complete source, compute @ position, and strip @ from source src, index := stripAt(prefix + test.snippet + suffix) if index < 0 { t.Errorf("missing @: %s (%s)", src, test.nodetyp) continue } // build syntax tree file, err := Parse(nil, strings.NewReader(src), nil, nil, 0) if err != nil { t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp) continue } // extract desired node node := extract(file) if typ := typeOf(node); typ != test.nodetyp { t.Errorf("type error: %s: type = %s, want %s", src, typ, test.nodetyp) continue } // verify node position with expected position as indicated by @ if pos := int(node.Pos().Col()); pos != index+colbase { t.Errorf("pos error: %s: pos = %d, want %d (%s)", src, pos, index+colbase, test.nodetyp) continue } } } func stripAt(s string) (string, int) { if i := strings.Index(s, "@"); i >= 0 { return s[:i] + s[i+1:], i } return s, -1 } func typeOf(n Node) string { const prefix = "*syntax." k := fmt.Sprintf("%T", n) return strings.TrimPrefix(k, prefix) }
go/src/cmd/compile/internal/syntax/nodes_test.go/0
{ "file_path": "go/src/cmd/compile/internal/syntax/nodes_test.go", "repo_id": "go", "token_count": 3890 }
110
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Check error message for use of = instead of == . package p func _() { if true || 0 /* ERROR cannot use assignment .* as value */ = 1 { } } func _(a, b string) { if a == "a" && b /* ERROR cannot use assignment .* as value */ = "b" { } }
go/src/cmd/compile/internal/syntax/testdata/issue23385.go/0
{ "file_path": "go/src/cmd/compile/internal/syntax/testdata/issue23385.go", "repo_id": "go", "token_count": 129 }
111
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test to make sure that equality functions (and hash // functions) don't do unaligned reads on architectures // that can't do unaligned reads. See issue 46283. package test import "testing" type T1 struct { x float32 a, b, c, d int16 // memequal64 } type T2 struct { x float32 a, b, c, d int32 // memequal128 } type A2 [2]byte // eq uses a 2-byte load type A4 [4]byte // eq uses a 4-byte load type A8 [8]byte // eq uses an 8-byte load //go:noinline func cmpT1(p, q *T1) { if *p != *q { panic("comparison test wrong") } } //go:noinline func cmpT2(p, q *T2) { if *p != *q { panic("comparison test wrong") } } //go:noinline func cmpA2(p, q *A2) { if *p != *q { panic("comparison test wrong") } } //go:noinline func cmpA4(p, q *A4) { if *p != *q { panic("comparison test wrong") } } //go:noinline func cmpA8(p, q *A8) { if *p != *q { panic("comparison test wrong") } } func TestAlignEqual(t *testing.T) { cmpT1(&T1{}, &T1{}) cmpT2(&T2{}, &T2{}) m1 := map[T1]bool{} m1[T1{}] = true m1[T1{}] = false if len(m1) != 1 { t.Fatalf("len(m1)=%d, want 1", len(m1)) } m2 := map[T2]bool{} m2[T2{}] = true m2[T2{}] = false if len(m2) != 1 { t.Fatalf("len(m2)=%d, want 1", len(m2)) } type X2 struct { y byte z A2 } var x2 X2 cmpA2(&x2.z, &A2{}) type X4 struct { y byte z A4 } var x4 X4 cmpA4(&x4.z, &A4{}) type X8 struct { y byte z A8 } var x8 X8 cmpA8(&x8.z, &A8{}) }
go/src/cmd/compile/internal/test/align_test.go/0
{ "file_path": "go/src/cmd/compile/internal/test/align_test.go", "repo_id": "go", "token_count": 779 }
112
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package test import ( "internal/testenv" "os" "path/filepath" "testing" ) const aliasSrc = ` package x type T = int ` func TestInvalidLang(t *testing.T) { t.Parallel() testenv.MustHaveGoBuild(t) dir := t.TempDir() src := filepath.Join(dir, "alias.go") if err := os.WriteFile(src, []byte(aliasSrc), 0644); err != nil { t.Fatal(err) } outfile := filepath.Join(dir, "alias.o") if testLang(t, "go9.99", src, outfile) == nil { t.Error("compilation with -lang=go9.99 succeeded unexpectedly") } // This test will have to be adjusted if we ever reach 1.99 or 2.0. if testLang(t, "go1.99", src, outfile) == nil { t.Error("compilation with -lang=go1.99 succeeded unexpectedly") } if testLang(t, "go1.8", src, outfile) == nil { t.Error("compilation with -lang=go1.8 succeeded unexpectedly") } if err := testLang(t, "go1.9", src, outfile); err != nil { t.Errorf("compilation with -lang=go1.9 failed unexpectedly: %v", err) } } func testLang(t *testing.T, lang, src, outfile string) error { run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=p", "-lang", lang, "-o", outfile, src} t.Log(run) out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput() t.Logf("%s", out) return err }
go/src/cmd/compile/internal/test/lang_test.go/0
{ "file_path": "go/src/cmd/compile/internal/test/lang_test.go", "repo_id": "go", "token_count": 544 }
113
// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT. package main import "testing" type utd64 struct { a, b uint64 add, sub, mul, div, mod uint64 } type itd64 struct { a, b int64 add, sub, mul, div, mod int64 } type utd32 struct { a, b uint32 add, sub, mul, div, mod uint32 } type itd32 struct { a, b int32 add, sub, mul, div, mod int32 } type utd16 struct { a, b uint16 add, sub, mul, div, mod uint16 } type itd16 struct { a, b int16 add, sub, mul, div, mod int16 } type utd8 struct { a, b uint8 add, sub, mul, div, mod uint8 } type itd8 struct { a, b int8 add, sub, mul, div, mod int8 } //go:noinline func add_uint64_ssa(a, b uint64) uint64 { return a + b } //go:noinline func sub_uint64_ssa(a, b uint64) uint64 { return a - b } //go:noinline func div_uint64_ssa(a, b uint64) uint64 { return a / b } //go:noinline func mod_uint64_ssa(a, b uint64) uint64 { return a % b } //go:noinline func mul_uint64_ssa(a, b uint64) uint64 { return a * b } //go:noinline func add_int64_ssa(a, b int64) int64 { return a + b } //go:noinline func sub_int64_ssa(a, b int64) int64 { return a - b } //go:noinline func div_int64_ssa(a, b int64) int64 { return a / b } //go:noinline func mod_int64_ssa(a, b int64) int64 { return a % b } //go:noinline func mul_int64_ssa(a, b int64) int64 { return a * b } //go:noinline func add_uint32_ssa(a, b uint32) uint32 { return a + b } //go:noinline func sub_uint32_ssa(a, b uint32) uint32 { return a - b } //go:noinline func div_uint32_ssa(a, b uint32) uint32 { return a / b } //go:noinline func mod_uint32_ssa(a, b uint32) uint32 { return a % b } //go:noinline func mul_uint32_ssa(a, b uint32) uint32 { return a * b } //go:noinline func add_int32_ssa(a, b int32) int32 { return a + b } //go:noinline func sub_int32_ssa(a, b int32) int32 { return a - b } //go:noinline func div_int32_ssa(a, b int32) int32 { return a / b } //go:noinline func mod_int32_ssa(a, b int32) int32 { return a % b } //go:noinline func mul_int32_ssa(a, b int32) int32 { return a * b } //go:noinline func add_uint16_ssa(a, b uint16) uint16 { return a + b } //go:noinline func sub_uint16_ssa(a, b uint16) uint16 { return a - b } //go:noinline func div_uint16_ssa(a, b uint16) uint16 { return a / b } //go:noinline func mod_uint16_ssa(a, b uint16) uint16 { return a % b } //go:noinline func mul_uint16_ssa(a, b uint16) uint16 { return a * b } //go:noinline func add_int16_ssa(a, b int16) int16 { return a + b } //go:noinline func sub_int16_ssa(a, b int16) int16 { return a - b } //go:noinline func div_int16_ssa(a, b int16) int16 { return a / b } //go:noinline func mod_int16_ssa(a, b int16) int16 { return a % b } //go:noinline func mul_int16_ssa(a, b int16) int16 { return a * b } //go:noinline func add_uint8_ssa(a, b uint8) uint8 { return a + b } //go:noinline func sub_uint8_ssa(a, b uint8) uint8 { return a - b } //go:noinline func div_uint8_ssa(a, b uint8) uint8 { return a / b } //go:noinline func mod_uint8_ssa(a, b uint8) uint8 { return a % b } //go:noinline func mul_uint8_ssa(a, b uint8) uint8 { return a * b } //go:noinline func add_int8_ssa(a, b int8) int8 { return a + b } //go:noinline func sub_int8_ssa(a, b int8) int8 { return a - b } //go:noinline func div_int8_ssa(a, b int8) int8 { return a / b } //go:noinline func mod_int8_ssa(a, b int8) int8 { return a % b } //go:noinline func mul_int8_ssa(a, b int8) int8 { return a * b } var uint64_data []utd64 = []utd64{utd64{a: 0, b: 0, add: 0, sub: 0, mul: 0}, utd64{a: 0, b: 1, add: 1, sub: 18446744073709551615, mul: 0, div: 0, mod: 0}, utd64{a: 0, b: 4294967296, add: 4294967296, sub: 18446744069414584320, mul: 0, div: 0, mod: 0}, utd64{a: 0, b: 18446744073709551615, add: 18446744073709551615, sub: 1, mul: 0, div: 0, mod: 0}, utd64{a: 1, b: 0, add: 1, sub: 1, mul: 0}, utd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, utd64{a: 1, b: 4294967296, add: 4294967297, sub: 18446744069414584321, mul: 4294967296, div: 0, mod: 1}, utd64{a: 1, b: 18446744073709551615, add: 0, sub: 2, mul: 18446744073709551615, div: 0, mod: 1}, utd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0}, utd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0}, utd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0}, utd64{a: 4294967296, b: 18446744073709551615, add: 4294967295, sub: 4294967297, mul: 18446744069414584320, div: 0, mod: 4294967296}, utd64{a: 18446744073709551615, b: 0, add: 18446744073709551615, sub: 18446744073709551615, mul: 0}, utd64{a: 18446744073709551615, b: 1, add: 0, sub: 18446744073709551614, mul: 18446744073709551615, div: 18446744073709551615, mod: 0}, utd64{a: 18446744073709551615, b: 4294967296, add: 4294967295, sub: 18446744069414584319, mul: 18446744069414584320, div: 4294967295, mod: 4294967295}, utd64{a: 18446744073709551615, b: 18446744073709551615, add: 18446744073709551614, sub: 0, mul: 1, div: 1, mod: 0}, } var int64_data []itd64 = []itd64{itd64{a: -9223372036854775808, b: -9223372036854775808, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, itd64{a: -9223372036854775808, b: -9223372036854775807, add: 1, sub: -1, mul: -9223372036854775808, div: 1, mod: -1}, itd64{a: -9223372036854775808, b: -4294967296, add: 9223372032559808512, sub: -9223372032559808512, mul: 0, div: 2147483648, mod: 0}, itd64{a: -9223372036854775808, b: -1, add: 9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0}, itd64{a: -9223372036854775808, b: 0, add: -9223372036854775808, sub: -9223372036854775808, mul: 0}, itd64{a: -9223372036854775808, b: 1, add: -9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0}, itd64{a: -9223372036854775808, b: 4294967296, add: -9223372032559808512, sub: 9223372032559808512, mul: 0, div: -2147483648, mod: 0}, itd64{a: -9223372036854775808, b: 9223372036854775806, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, itd64{a: -9223372036854775808, b: 9223372036854775807, add: -1, sub: 1, mul: -9223372036854775808, div: -1, mod: -1}, itd64{a: -9223372036854775807, b: -9223372036854775808, add: 1, sub: 1, mul: -9223372036854775808, div: 0, mod: -9223372036854775807}, itd64{a: -9223372036854775807, b: -9223372036854775807, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd64{a: -9223372036854775807, b: -4294967296, add: 9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 2147483647, mod: -4294967295}, itd64{a: -9223372036854775807, b: -1, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0}, itd64{a: -9223372036854775807, b: 0, add: -9223372036854775807, sub: -9223372036854775807, mul: 0}, itd64{a: -9223372036854775807, b: 1, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0}, itd64{a: -9223372036854775807, b: 4294967296, add: -9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: -2147483647, mod: -4294967295}, itd64{a: -9223372036854775807, b: 9223372036854775806, add: -1, sub: 3, mul: 9223372036854775806, div: -1, mod: -1}, itd64{a: -9223372036854775807, b: 9223372036854775807, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd64{a: -4294967296, b: -9223372036854775808, add: 9223372032559808512, sub: 9223372032559808512, mul: 0, div: 0, mod: -4294967296}, itd64{a: -4294967296, b: -9223372036854775807, add: 9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 0, mod: -4294967296}, itd64{a: -4294967296, b: -4294967296, add: -8589934592, sub: 0, mul: 0, div: 1, mod: 0}, itd64{a: -4294967296, b: -1, add: -4294967297, sub: -4294967295, mul: 4294967296, div: 4294967296, mod: 0}, itd64{a: -4294967296, b: 0, add: -4294967296, sub: -4294967296, mul: 0}, itd64{a: -4294967296, b: 1, add: -4294967295, sub: -4294967297, mul: -4294967296, div: -4294967296, mod: 0}, itd64{a: -4294967296, b: 4294967296, add: 0, sub: -8589934592, mul: 0, div: -1, mod: 0}, itd64{a: -4294967296, b: 9223372036854775806, add: 9223372032559808510, sub: 9223372032559808514, mul: 8589934592, div: 0, mod: -4294967296}, itd64{a: -4294967296, b: 9223372036854775807, add: 9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: 0, mod: -4294967296}, itd64{a: -1, b: -9223372036854775808, add: 9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: 0, mod: -1}, itd64{a: -1, b: -9223372036854775807, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 0, mod: -1}, itd64{a: -1, b: -4294967296, add: -4294967297, sub: 4294967295, mul: 4294967296, div: 0, mod: -1}, itd64{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, itd64{a: -1, b: 0, add: -1, sub: -1, mul: 0}, itd64{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd64{a: -1, b: 4294967296, add: 4294967295, sub: -4294967297, mul: -4294967296, div: 0, mod: -1}, itd64{a: -1, b: 9223372036854775806, add: 9223372036854775805, sub: -9223372036854775807, mul: -9223372036854775806, div: 0, mod: -1}, itd64{a: -1, b: 9223372036854775807, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: -1}, itd64{a: 0, b: -9223372036854775808, add: -9223372036854775808, sub: -9223372036854775808, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: -9223372036854775807, add: -9223372036854775807, sub: 9223372036854775807, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: -4294967296, add: -4294967296, sub: 4294967296, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: 0, add: 0, sub: 0, mul: 0}, itd64{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: 4294967296, add: 4294967296, sub: -4294967296, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: 9223372036854775806, add: 9223372036854775806, sub: -9223372036854775806, mul: 0, div: 0, mod: 0}, itd64{a: 0, b: 9223372036854775807, add: 9223372036854775807, sub: -9223372036854775807, mul: 0, div: 0, mod: 0}, itd64{a: 1, b: -9223372036854775808, add: -9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: 0, mod: 1}, itd64{a: 1, b: -9223372036854775807, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: 1}, itd64{a: 1, b: -4294967296, add: -4294967295, sub: 4294967297, mul: -4294967296, div: 0, mod: 1}, itd64{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd64{a: 1, b: 0, add: 1, sub: 1, mul: 0}, itd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd64{a: 1, b: 4294967296, add: 4294967297, sub: -4294967295, mul: 4294967296, div: 0, mod: 1}, itd64{a: 1, b: 9223372036854775806, add: 9223372036854775807, sub: -9223372036854775805, mul: 9223372036854775806, div: 0, mod: 1}, itd64{a: 1, b: 9223372036854775807, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 0, mod: 1}, itd64{a: 4294967296, b: -9223372036854775808, add: -9223372032559808512, sub: -9223372032559808512, mul: 0, div: 0, mod: 4294967296}, itd64{a: 4294967296, b: -9223372036854775807, add: -9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: 0, mod: 4294967296}, itd64{a: 4294967296, b: -4294967296, add: 0, sub: 8589934592, mul: 0, div: -1, mod: 0}, itd64{a: 4294967296, b: -1, add: 4294967295, sub: 4294967297, mul: -4294967296, div: -4294967296, mod: 0}, itd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0}, itd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0}, itd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0}, itd64{a: 4294967296, b: 9223372036854775806, add: -9223372032559808514, sub: -9223372032559808510, mul: -8589934592, div: 0, mod: 4294967296}, itd64{a: 4294967296, b: 9223372036854775807, add: -9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 0, mod: 4294967296}, itd64{a: 9223372036854775806, b: -9223372036854775808, add: -2, sub: -2, mul: 0, div: 0, mod: 9223372036854775806}, itd64{a: 9223372036854775806, b: -9223372036854775807, add: -1, sub: -3, mul: 9223372036854775806, div: 0, mod: 9223372036854775806}, itd64{a: 9223372036854775806, b: -4294967296, add: 9223372032559808510, sub: -9223372032559808514, mul: 8589934592, div: -2147483647, mod: 4294967294}, itd64{a: 9223372036854775806, b: -1, add: 9223372036854775805, sub: 9223372036854775807, mul: -9223372036854775806, div: -9223372036854775806, mod: 0}, itd64{a: 9223372036854775806, b: 0, add: 9223372036854775806, sub: 9223372036854775806, mul: 0}, itd64{a: 9223372036854775806, b: 1, add: 9223372036854775807, sub: 9223372036854775805, mul: 9223372036854775806, div: 9223372036854775806, mod: 0}, itd64{a: 9223372036854775806, b: 4294967296, add: -9223372032559808514, sub: 9223372032559808510, mul: -8589934592, div: 2147483647, mod: 4294967294}, itd64{a: 9223372036854775806, b: 9223372036854775806, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, itd64{a: 9223372036854775806, b: 9223372036854775807, add: -3, sub: -1, mul: -9223372036854775806, div: 0, mod: 9223372036854775806}, itd64{a: 9223372036854775807, b: -9223372036854775808, add: -1, sub: -1, mul: -9223372036854775808, div: 0, mod: 9223372036854775807}, itd64{a: 9223372036854775807, b: -9223372036854775807, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd64{a: 9223372036854775807, b: -4294967296, add: 9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: -2147483647, mod: 4294967295}, itd64{a: 9223372036854775807, b: -1, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0}, itd64{a: 9223372036854775807, b: 0, add: 9223372036854775807, sub: 9223372036854775807, mul: 0}, itd64{a: 9223372036854775807, b: 1, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0}, itd64{a: 9223372036854775807, b: 4294967296, add: -9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 2147483647, mod: 4294967295}, itd64{a: 9223372036854775807, b: 9223372036854775806, add: -3, sub: 1, mul: -9223372036854775806, div: 1, mod: 1}, itd64{a: 9223372036854775807, b: 9223372036854775807, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, } var uint32_data []utd32 = []utd32{utd32{a: 0, b: 0, add: 0, sub: 0, mul: 0}, utd32{a: 0, b: 1, add: 1, sub: 4294967295, mul: 0, div: 0, mod: 0}, utd32{a: 0, b: 4294967295, add: 4294967295, sub: 1, mul: 0, div: 0, mod: 0}, utd32{a: 1, b: 0, add: 1, sub: 1, mul: 0}, utd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, utd32{a: 1, b: 4294967295, add: 0, sub: 2, mul: 4294967295, div: 0, mod: 1}, utd32{a: 4294967295, b: 0, add: 4294967295, sub: 4294967295, mul: 0}, utd32{a: 4294967295, b: 1, add: 0, sub: 4294967294, mul: 4294967295, div: 4294967295, mod: 0}, utd32{a: 4294967295, b: 4294967295, add: 4294967294, sub: 0, mul: 1, div: 1, mod: 0}, } var int32_data []itd32 = []itd32{itd32{a: -2147483648, b: -2147483648, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, itd32{a: -2147483648, b: -2147483647, add: 1, sub: -1, mul: -2147483648, div: 1, mod: -1}, itd32{a: -2147483648, b: -1, add: 2147483647, sub: -2147483647, mul: -2147483648, div: -2147483648, mod: 0}, itd32{a: -2147483648, b: 0, add: -2147483648, sub: -2147483648, mul: 0}, itd32{a: -2147483648, b: 1, add: -2147483647, sub: 2147483647, mul: -2147483648, div: -2147483648, mod: 0}, itd32{a: -2147483648, b: 2147483647, add: -1, sub: 1, mul: -2147483648, div: -1, mod: -1}, itd32{a: -2147483647, b: -2147483648, add: 1, sub: 1, mul: -2147483648, div: 0, mod: -2147483647}, itd32{a: -2147483647, b: -2147483647, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd32{a: -2147483647, b: -1, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 2147483647, mod: 0}, itd32{a: -2147483647, b: 0, add: -2147483647, sub: -2147483647, mul: 0}, itd32{a: -2147483647, b: 1, add: -2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0}, itd32{a: -2147483647, b: 2147483647, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd32{a: -1, b: -2147483648, add: 2147483647, sub: 2147483647, mul: -2147483648, div: 0, mod: -1}, itd32{a: -1, b: -2147483647, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 0, mod: -1}, itd32{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, itd32{a: -1, b: 0, add: -1, sub: -1, mul: 0}, itd32{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd32{a: -1, b: 2147483647, add: 2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: -1}, itd32{a: 0, b: -2147483648, add: -2147483648, sub: -2147483648, mul: 0, div: 0, mod: 0}, itd32{a: 0, b: -2147483647, add: -2147483647, sub: 2147483647, mul: 0, div: 0, mod: 0}, itd32{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, itd32{a: 0, b: 0, add: 0, sub: 0, mul: 0}, itd32{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, itd32{a: 0, b: 2147483647, add: 2147483647, sub: -2147483647, mul: 0, div: 0, mod: 0}, itd32{a: 1, b: -2147483648, add: -2147483647, sub: -2147483647, mul: -2147483648, div: 0, mod: 1}, itd32{a: 1, b: -2147483647, add: -2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: 1}, itd32{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd32{a: 1, b: 0, add: 1, sub: 1, mul: 0}, itd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd32{a: 1, b: 2147483647, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 0, mod: 1}, itd32{a: 2147483647, b: -2147483648, add: -1, sub: -1, mul: -2147483648, div: 0, mod: 2147483647}, itd32{a: 2147483647, b: -2147483647, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd32{a: 2147483647, b: -1, add: 2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0}, itd32{a: 2147483647, b: 0, add: 2147483647, sub: 2147483647, mul: 0}, itd32{a: 2147483647, b: 1, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 2147483647, mod: 0}, itd32{a: 2147483647, b: 2147483647, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, } var uint16_data []utd16 = []utd16{utd16{a: 0, b: 0, add: 0, sub: 0, mul: 0}, utd16{a: 0, b: 1, add: 1, sub: 65535, mul: 0, div: 0, mod: 0}, utd16{a: 0, b: 65535, add: 65535, sub: 1, mul: 0, div: 0, mod: 0}, utd16{a: 1, b: 0, add: 1, sub: 1, mul: 0}, utd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, utd16{a: 1, b: 65535, add: 0, sub: 2, mul: 65535, div: 0, mod: 1}, utd16{a: 65535, b: 0, add: 65535, sub: 65535, mul: 0}, utd16{a: 65535, b: 1, add: 0, sub: 65534, mul: 65535, div: 65535, mod: 0}, utd16{a: 65535, b: 65535, add: 65534, sub: 0, mul: 1, div: 1, mod: 0}, } var int16_data []itd16 = []itd16{itd16{a: -32768, b: -32768, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, itd16{a: -32768, b: -32767, add: 1, sub: -1, mul: -32768, div: 1, mod: -1}, itd16{a: -32768, b: -1, add: 32767, sub: -32767, mul: -32768, div: -32768, mod: 0}, itd16{a: -32768, b: 0, add: -32768, sub: -32768, mul: 0}, itd16{a: -32768, b: 1, add: -32767, sub: 32767, mul: -32768, div: -32768, mod: 0}, itd16{a: -32768, b: 32766, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, itd16{a: -32768, b: 32767, add: -1, sub: 1, mul: -32768, div: -1, mod: -1}, itd16{a: -32767, b: -32768, add: 1, sub: 1, mul: -32768, div: 0, mod: -32767}, itd16{a: -32767, b: -32767, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd16{a: -32767, b: -1, add: -32768, sub: -32766, mul: 32767, div: 32767, mod: 0}, itd16{a: -32767, b: 0, add: -32767, sub: -32767, mul: 0}, itd16{a: -32767, b: 1, add: -32766, sub: -32768, mul: -32767, div: -32767, mod: 0}, itd16{a: -32767, b: 32766, add: -1, sub: 3, mul: 32766, div: -1, mod: -1}, itd16{a: -32767, b: 32767, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd16{a: -1, b: -32768, add: 32767, sub: 32767, mul: -32768, div: 0, mod: -1}, itd16{a: -1, b: -32767, add: -32768, sub: 32766, mul: 32767, div: 0, mod: -1}, itd16{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, itd16{a: -1, b: 0, add: -1, sub: -1, mul: 0}, itd16{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd16{a: -1, b: 32766, add: 32765, sub: -32767, mul: -32766, div: 0, mod: -1}, itd16{a: -1, b: 32767, add: 32766, sub: -32768, mul: -32767, div: 0, mod: -1}, itd16{a: 0, b: -32768, add: -32768, sub: -32768, mul: 0, div: 0, mod: 0}, itd16{a: 0, b: -32767, add: -32767, sub: 32767, mul: 0, div: 0, mod: 0}, itd16{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, itd16{a: 0, b: 0, add: 0, sub: 0, mul: 0}, itd16{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, itd16{a: 0, b: 32766, add: 32766, sub: -32766, mul: 0, div: 0, mod: 0}, itd16{a: 0, b: 32767, add: 32767, sub: -32767, mul: 0, div: 0, mod: 0}, itd16{a: 1, b: -32768, add: -32767, sub: -32767, mul: -32768, div: 0, mod: 1}, itd16{a: 1, b: -32767, add: -32766, sub: -32768, mul: -32767, div: 0, mod: 1}, itd16{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd16{a: 1, b: 0, add: 1, sub: 1, mul: 0}, itd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd16{a: 1, b: 32766, add: 32767, sub: -32765, mul: 32766, div: 0, mod: 1}, itd16{a: 1, b: 32767, add: -32768, sub: -32766, mul: 32767, div: 0, mod: 1}, itd16{a: 32766, b: -32768, add: -2, sub: -2, mul: 0, div: 0, mod: 32766}, itd16{a: 32766, b: -32767, add: -1, sub: -3, mul: 32766, div: 0, mod: 32766}, itd16{a: 32766, b: -1, add: 32765, sub: 32767, mul: -32766, div: -32766, mod: 0}, itd16{a: 32766, b: 0, add: 32766, sub: 32766, mul: 0}, itd16{a: 32766, b: 1, add: 32767, sub: 32765, mul: 32766, div: 32766, mod: 0}, itd16{a: 32766, b: 32766, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, itd16{a: 32766, b: 32767, add: -3, sub: -1, mul: -32766, div: 0, mod: 32766}, itd16{a: 32767, b: -32768, add: -1, sub: -1, mul: -32768, div: 0, mod: 32767}, itd16{a: 32767, b: -32767, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd16{a: 32767, b: -1, add: 32766, sub: -32768, mul: -32767, div: -32767, mod: 0}, itd16{a: 32767, b: 0, add: 32767, sub: 32767, mul: 0}, itd16{a: 32767, b: 1, add: -32768, sub: 32766, mul: 32767, div: 32767, mod: 0}, itd16{a: 32767, b: 32766, add: -3, sub: 1, mul: -32766, div: 1, mod: 1}, itd16{a: 32767, b: 32767, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, } var uint8_data []utd8 = []utd8{utd8{a: 0, b: 0, add: 0, sub: 0, mul: 0}, utd8{a: 0, b: 1, add: 1, sub: 255, mul: 0, div: 0, mod: 0}, utd8{a: 0, b: 255, add: 255, sub: 1, mul: 0, div: 0, mod: 0}, utd8{a: 1, b: 0, add: 1, sub: 1, mul: 0}, utd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, utd8{a: 1, b: 255, add: 0, sub: 2, mul: 255, div: 0, mod: 1}, utd8{a: 255, b: 0, add: 255, sub: 255, mul: 0}, utd8{a: 255, b: 1, add: 0, sub: 254, mul: 255, div: 255, mod: 0}, utd8{a: 255, b: 255, add: 254, sub: 0, mul: 1, div: 1, mod: 0}, } var int8_data []itd8 = []itd8{itd8{a: -128, b: -128, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, itd8{a: -128, b: -127, add: 1, sub: -1, mul: -128, div: 1, mod: -1}, itd8{a: -128, b: -1, add: 127, sub: -127, mul: -128, div: -128, mod: 0}, itd8{a: -128, b: 0, add: -128, sub: -128, mul: 0}, itd8{a: -128, b: 1, add: -127, sub: 127, mul: -128, div: -128, mod: 0}, itd8{a: -128, b: 126, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, itd8{a: -128, b: 127, add: -1, sub: 1, mul: -128, div: -1, mod: -1}, itd8{a: -127, b: -128, add: 1, sub: 1, mul: -128, div: 0, mod: -127}, itd8{a: -127, b: -127, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd8{a: -127, b: -1, add: -128, sub: -126, mul: 127, div: 127, mod: 0}, itd8{a: -127, b: 0, add: -127, sub: -127, mul: 0}, itd8{a: -127, b: 1, add: -126, sub: -128, mul: -127, div: -127, mod: 0}, itd8{a: -127, b: 126, add: -1, sub: 3, mul: 126, div: -1, mod: -1}, itd8{a: -127, b: 127, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd8{a: -1, b: -128, add: 127, sub: 127, mul: -128, div: 0, mod: -1}, itd8{a: -1, b: -127, add: -128, sub: 126, mul: 127, div: 0, mod: -1}, itd8{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, itd8{a: -1, b: 0, add: -1, sub: -1, mul: 0}, itd8{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd8{a: -1, b: 126, add: 125, sub: -127, mul: -126, div: 0, mod: -1}, itd8{a: -1, b: 127, add: 126, sub: -128, mul: -127, div: 0, mod: -1}, itd8{a: 0, b: -128, add: -128, sub: -128, mul: 0, div: 0, mod: 0}, itd8{a: 0, b: -127, add: -127, sub: 127, mul: 0, div: 0, mod: 0}, itd8{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, itd8{a: 0, b: 0, add: 0, sub: 0, mul: 0}, itd8{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, itd8{a: 0, b: 126, add: 126, sub: -126, mul: 0, div: 0, mod: 0}, itd8{a: 0, b: 127, add: 127, sub: -127, mul: 0, div: 0, mod: 0}, itd8{a: 1, b: -128, add: -127, sub: -127, mul: -128, div: 0, mod: 1}, itd8{a: 1, b: -127, add: -126, sub: -128, mul: -127, div: 0, mod: 1}, itd8{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, itd8{a: 1, b: 0, add: 1, sub: 1, mul: 0}, itd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, itd8{a: 1, b: 126, add: 127, sub: -125, mul: 126, div: 0, mod: 1}, itd8{a: 1, b: 127, add: -128, sub: -126, mul: 127, div: 0, mod: 1}, itd8{a: 126, b: -128, add: -2, sub: -2, mul: 0, div: 0, mod: 126}, itd8{a: 126, b: -127, add: -1, sub: -3, mul: 126, div: 0, mod: 126}, itd8{a: 126, b: -1, add: 125, sub: 127, mul: -126, div: -126, mod: 0}, itd8{a: 126, b: 0, add: 126, sub: 126, mul: 0}, itd8{a: 126, b: 1, add: 127, sub: 125, mul: 126, div: 126, mod: 0}, itd8{a: 126, b: 126, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, itd8{a: 126, b: 127, add: -3, sub: -1, mul: -126, div: 0, mod: 126}, itd8{a: 127, b: -128, add: -1, sub: -1, mul: -128, div: 0, mod: 127}, itd8{a: 127, b: -127, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, itd8{a: 127, b: -1, add: 126, sub: -128, mul: -127, div: -127, mod: 0}, itd8{a: 127, b: 0, add: 127, sub: 127, mul: 0}, itd8{a: 127, b: 1, add: -128, sub: 126, mul: 127, div: 127, mod: 0}, itd8{a: 127, b: 126, add: -3, sub: 1, mul: -126, div: 1, mod: 1}, itd8{a: 127, b: 127, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, } //TestArithmeticBoundary tests boundary results for arithmetic operations. func TestArithmeticBoundary(t *testing.T) { for _, v := range uint64_data { if got := add_uint64_ssa(v.a, v.b); got != v.add { t.Errorf("add_uint64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_uint64_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_uint64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_uint64_ssa(v.a, v.b); got != v.div { t.Errorf("div_uint64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_uint64_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_uint64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_uint64_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_uint64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range int64_data { if got := add_int64_ssa(v.a, v.b); got != v.add { t.Errorf("add_int64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_int64_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_int64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_int64_ssa(v.a, v.b); got != v.div { t.Errorf("div_int64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_int64_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_int64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_int64_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_int64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range uint32_data { if got := add_uint32_ssa(v.a, v.b); got != v.add { t.Errorf("add_uint32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_uint32_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_uint32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_uint32_ssa(v.a, v.b); got != v.div { t.Errorf("div_uint32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_uint32_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_uint32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_uint32_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_uint32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range int32_data { if got := add_int32_ssa(v.a, v.b); got != v.add { t.Errorf("add_int32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_int32_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_int32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_int32_ssa(v.a, v.b); got != v.div { t.Errorf("div_int32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_int32_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_int32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_int32_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_int32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range uint16_data { if got := add_uint16_ssa(v.a, v.b); got != v.add { t.Errorf("add_uint16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_uint16_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_uint16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_uint16_ssa(v.a, v.b); got != v.div { t.Errorf("div_uint16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_uint16_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_uint16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_uint16_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_uint16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range int16_data { if got := add_int16_ssa(v.a, v.b); got != v.add { t.Errorf("add_int16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_int16_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_int16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_int16_ssa(v.a, v.b); got != v.div { t.Errorf("div_int16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_int16_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_int16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_int16_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_int16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range uint8_data { if got := add_uint8_ssa(v.a, v.b); got != v.add { t.Errorf("add_uint8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_uint8_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_uint8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_uint8_ssa(v.a, v.b); got != v.div { t.Errorf("div_uint8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_uint8_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_uint8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_uint8_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_uint8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } for _, v := range int8_data { if got := add_int8_ssa(v.a, v.b); got != v.add { t.Errorf("add_int8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) } if got := sub_int8_ssa(v.a, v.b); got != v.sub { t.Errorf("sub_int8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) } if v.b != 0 { if got := div_int8_ssa(v.a, v.b); got != v.div { t.Errorf("div_int8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) } } if v.b != 0 { if got := mod_int8_ssa(v.a, v.b); got != v.mod { t.Errorf("mod_int8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) } } if got := mul_int8_ssa(v.a, v.b); got != v.mul { t.Errorf("mul_int8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) } } }
go/src/cmd/compile/internal/test/testdata/arithBoundary_test.go/0
{ "file_path": "go/src/cmd/compile/internal/test/testdata/arithBoundary_test.go", "repo_id": "go", "token_count": 17032 }
114
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "strings" ) // make fake flow graph. // The blocks of the flow graph are designated with letters A // through Z, always including A (start block) and Z (exit // block) The specification of a flow graph is a comma- // separated list of block successor words, for blocks ordered // A, B, C etc, where each block except Z has one or two // successors, and any block except A can be a target. Within // the generated code, each block with two successors includes // a conditional testing x & 1 != 0 (x is the input parameter // to the generated function) and also unconditionally shifts x // right by one, so that different inputs generate different // execution paths, including loops. Every block inverts a // global binary to ensure it is not empty. For a flow graph // with J words (J+1 blocks), a J-1 bit serial number specifies // which blocks (not including A and Z) include an increment of // the return variable y by increasing powers of 10, and a // different version of the test function is created for each // of the 2-to-the-(J-1) serial numbers. // For each generated function a compact summary is also // created so that the generated function can be simulated // with a simple interpreter to sanity check the behavior of // the compiled code. // For example: // func BC_CD_BE_BZ_CZ101(x int64) int64 { // y := int64(0) // var b int64 // _ = b // b = x & 1 // x = x >> 1 // if b != 0 { // goto C // } // goto B // B: // glob_ = !glob_ // y += 1 // b = x & 1 // x = x >> 1 // if b != 0 { // goto D // } // goto C // C: // glob_ = !glob_ // // no y increment // b = x & 1 // x = x >> 1 // if b != 0 { // goto E // } // goto B // D: // glob_ = !glob_ // y += 10 // b = x & 1 // x = x >> 1 // if b != 0 { // goto Z // } // goto B // E: // glob_ = !glob_ // // no y increment // b = x & 1 // x = x >> 1 // if b != 0 { // goto Z // } // goto C // Z: // return y // } // {f:BC_CD_BE_BZ_CZ101, // maxin:32, blocks:[]blo{ // blo{inc:0, cond:true, succs:[2]int64{1, 2}}, // blo{inc:1, cond:true, succs:[2]int64{2, 3}}, // blo{inc:0, cond:true, succs:[2]int64{1, 4}}, // blo{inc:10, cond:true, succs:[2]int64{1, 25}}, // blo{inc:0, cond:true, succs:[2]int64{2, 25}},}}, var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" func blocks(spec string) (blocks []string, fnameBase string) { spec = strings.ToUpper(spec) blocks = strings.Split(spec, ",") fnameBase = strings.Replace(spec, ",", "_", -1) return } func makeFunctionFromFlowGraph(blocks []blo, fname string) string { s := "" for j := range blocks { // begin block if j == 0 { // block A, implicit label s += ` func ` + fname + `(x int64) int64 { y := int64(0) var b int64 _ = b` } else { // block B,C, etc, explicit label w/ conditional increment l := labels[j : j+1] yeq := ` // no y increment` if blocks[j].inc != 0 { yeq = ` y += ` + fmt.Sprintf("%d", blocks[j].inc) } s += ` ` + l + `: glob = !glob` + yeq } // edges to successors if blocks[j].cond { // conditionally branch to second successor s += ` b = x & 1 x = x >> 1 if b != 0 {` + ` goto ` + string(labels[blocks[j].succs[1]]) + ` }` } // branch to first successor s += ` goto ` + string(labels[blocks[j].succs[0]]) } // end block (Z) s += ` Z: return y } ` return s } var graphs []string = []string{ "Z", "BZ,Z", "B,BZ", "BZ,BZ", "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB", "BC,C,Z", "BC,BC,Z", "BC,BC,BZ", "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ", "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z", "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ", "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ", "BC,CD,BE,BZ,CZ", "BC,BD,CE,CZ,BZ", "BC,BD,CE,FZ,GZ,F,G", "BC,BD,CE,FZ,GZ,G,F", "BC,DE,BE,FZ,FZ,Z", "BC,DE,BE,FZ,ZF,Z", "BC,DE,BE,ZF,FZ,Z", "BC,DE,EB,FZ,FZ,Z", "BC,ED,BE,FZ,FZ,Z", "CB,DE,BE,FZ,FZ,Z", "CB,ED,BE,FZ,FZ,Z", "BC,ED,EB,FZ,ZF,Z", "CB,DE,EB,ZF,FZ,Z", "CB,ED,EB,FZ,FZ,Z", "BZ,CD,CD,CE,BZ", "EC,DF,FG,ZC,GB,BE,FD", "BH,CF,DG,HE,BF,CG,DH,BZ", } // blo describes a block in the generated/interpreted code type blo struct { inc int64 // increment amount cond bool // block ends in conditional succs [2]int64 } // strings2blocks converts a slice of strings specifying // successors into a slice of blo encoding the blocks in a // common form easy to execute or interpret. func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) { bs = make([]blo, len(blocks)) edge := int64(1) cond = 0 k := uint(0) for j, s := range blocks { if j == 0 { } else { if (i>>k)&1 != 0 { bs[j].inc = edge edge *= 10 } k++ } if len(s) > 1 { bs[j].succs[1] = int64(blocks[j][1] - 'A') bs[j].cond = true cond++ } bs[j].succs[0] = int64(blocks[j][0] - 'A') } return bs, cond } // fmtBlocks writes out the blocks for consumption in the generated test func fmtBlocks(bs []blo) string { s := "[]blo{" for _, b := range bs { s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1]) } s += "}" return s } func main() { fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go. package main import "fmt" var glob bool `) s := "var funs []fun = []fun{" for _, g := range graphs { split, fnameBase := blocks(g) nconfigs := 1 << uint(len(split)-1) for i := 0; i < nconfigs; i++ { fname := fnameBase + fmt.Sprintf("%b", i) bs, k := strings2blocks(split, fname, i) fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname)) s += ` {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<<k) + `, blocks:` + fmtBlocks(bs) + `},` } } s += `} ` // write types for name+array tables. fmt.Printf("%s", ` type blo struct { inc int64 cond bool succs [2]int64 } type fun struct { f func(int64) int64 maxin int64 blocks []blo } `) // write table of function names and blo arrays. fmt.Printf("%s", s) // write interpreter and main/test fmt.Printf("%s", ` func interpret(blocks []blo, x int64) (int64, bool) { y := int64(0) last := int64(25) // 'Z'-'A' j := int64(0) for i := 0; i < 4*len(blocks); i++ { b := blocks[j] y += b.inc next := b.succs[0] if b.cond { c := x&1 != 0 x = x>>1 if c { next = b.succs[1] } } if next == last { return y, true } j = next } return -1, false } func main() { sum := int64(0) for i, f := range funs { for x := int64(0); x < 16*f.maxin; x++ { y, ok := interpret(f.blocks, x) if ok { yy := f.f(x) if y != yy { fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks) return } sum += y } } } // fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum) } `) }
go/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go/0
{ "file_path": "go/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go", "repo_id": "go", "token_count": 3165 }
115
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // WARNING: Please avoid updating this file. If this file needs to be updated, // then a new devirt.pprof file should be generated: // // $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/devirtualize/ // $ go mod init example.com/pgo/devirtualize // $ go test -bench=. -cpuprofile ./devirt.pprof package devirt import ( "testing" "example.com/pgo/devirtualize/mult.pkg" ) func BenchmarkDevirtIface(b *testing.B) { var ( a1 Add a2 Sub m1 mult.Mult m2 mult.NegMult ) ExerciseIface(b.N, a1, a2, m1, m2) } // Verify that devirtualization doesn't result in calls or side effects applying more than once. func TestDevirtIface(t *testing.T) { var ( a1 Add a2 Sub m1 mult.Mult m2 mult.NegMult ) if v := ExerciseIface(10, a1, a2, m1, m2); v != 1176 { t.Errorf("ExerciseIface(10) got %d want 1176", v) } } func BenchmarkDevirtFuncConcrete(b *testing.B) { ExerciseFuncConcrete(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn) } func TestDevirtFuncConcrete(t *testing.T) { if v := ExerciseFuncConcrete(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 { t.Errorf("ExerciseFuncConcrete(10) got %d want 1176", v) } } func BenchmarkDevirtFuncField(b *testing.B) { ExerciseFuncField(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn) } func TestDevirtFuncField(t *testing.T) { if v := ExerciseFuncField(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 { t.Errorf("ExerciseFuncField(10) got %d want 1176", v) } } func BenchmarkDevirtFuncClosure(b *testing.B) { ExerciseFuncClosure(b.N, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()) } func TestDevirtFuncClosure(t *testing.T) { if v := ExerciseFuncClosure(10, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()); v != 1176 { t.Errorf("ExerciseFuncClosure(10) got %d want 1176", v) } }
go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go/0
{ "file_path": "go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go", "repo_id": "go", "token_count": 801 }
116
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "math" "testing" ) var tests = [...]struct { name string in float64 // used for error messages, not an input got float64 want float64 }{ {"sqrt0", 0, math.Sqrt(0), 0}, {"sqrt1", 1, math.Sqrt(1), 1}, {"sqrt2", 2, math.Sqrt(2), math.Sqrt2}, {"sqrt4", 4, math.Sqrt(4), 2}, {"sqrt100", 100, math.Sqrt(100), 10}, {"sqrt101", 101, math.Sqrt(101), 10.04987562112089}, } var nanTests = [...]struct { name string in float64 // used for error messages, not an input got float64 }{ {"sqrtNaN", math.NaN(), math.Sqrt(math.NaN())}, {"sqrtNegative", -1, math.Sqrt(-1)}, {"sqrtNegInf", math.Inf(-1), math.Sqrt(math.Inf(-1))}, } func TestSqrtConst(t *testing.T) { for _, test := range tests { if test.got != test.want { t.Errorf("%s: math.Sqrt(%f): got %f, want %f\n", test.name, test.in, test.got, test.want) } } for _, test := range nanTests { if math.IsNaN(test.got) != true { t.Errorf("%s: math.Sqrt(%f): got %f, want NaN\n", test.name, test.in, test.got) } } if got := math.Sqrt(math.Inf(1)); !math.IsInf(got, 1) { t.Errorf("math.Sqrt(+Inf), got %f, want +Inf\n", got) } }
go/src/cmd/compile/internal/test/testdata/sqrtConst_test.go/0
{ "file_path": "go/src/cmd/compile/internal/test/testdata/sqrtConst_test.go", "repo_id": "go", "token_count": 575 }
117
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package typecheck import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/types" "cmd/internal/src" "fmt" "go/constant" "go/token" ) // MakeDotArgs package all the arguments that match a ... T parameter into a []T. func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node { if len(args) == 0 { return ir.NewNilExpr(pos, typ) } args = append([]ir.Node(nil), args...) lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args) lit.SetImplicit(true) n := Expr(lit) if n.Type() == nil { base.FatalfAt(pos, "mkdotargslice: typecheck failed") } return n } // FixVariadicCall rewrites calls to variadic functions to use an // explicit ... argument if one is not already present. func FixVariadicCall(call *ir.CallExpr) { fntype := call.Fun.Type() if !fntype.IsVariadic() || call.IsDDD { return } vi := fntype.NumParams() - 1 vt := fntype.Param(vi).Type args := call.Args extra := args[vi:] slice := MakeDotArgs(call.Pos(), vt, extra) for i := range extra { extra[i] = nil // allow GC } call.Args = append(args[:vi], slice) call.IsDDD = true } // FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...). func FixMethodCall(call *ir.CallExpr) { if call.Fun.Op() != ir.ODOTMETH { return } dot := call.Fun.(*ir.SelectorExpr) fn := NewMethodExpr(dot.Pos(), dot.X.Type(), dot.Selection.Sym) args := make([]ir.Node, 1+len(call.Args)) args[0] = dot.X copy(args[1:], call.Args) call.SetOp(ir.OCALLFUNC) call.Fun = fn call.Args = args } func AssertFixedCall(call *ir.CallExpr) { if call.Fun.Type().IsVariadic() && !call.IsDDD { base.FatalfAt(call.Pos(), "missed FixVariadicCall") } if call.Op() == ir.OCALLMETH { base.FatalfAt(call.Pos(), "missed FixMethodCall") } } // ClosureType returns the struct type used to hold all the information // needed in the closure for clo (clo must be a OCLOSURE node). // The address of a variable of the returned type can be cast to a func. func ClosureType(clo *ir.ClosureExpr) *types.Type { // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s // and has one float64 argument and no results, // the generated code looks like: // // clos = &struct{F uintptr; X0 *int; X1 *string}{func.1, &i, &s} // // The use of the struct provides type information to the garbage // collector so that it can walk the closure. We could use (in this // case) [3]unsafe.Pointer instead, but that would leave the gc in // the dark. The information appears in the binary in the form of // type descriptors; the struct is unnamed and uses exported field // names so that closures in multiple packages with the same struct // type can share the descriptor. fields := make([]*types.Field, 1+len(clo.Func.ClosureVars)) fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR]) it := NewClosureStructIter(clo.Func.ClosureVars) i := 0 for { n, typ, _ := it.Next() if n == nil { break } fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ) i++ } typ := types.NewStruct(fields) typ.SetNoalg(true) return typ } // MethodValueType returns the struct type used to hold all the information // needed in the closure for a OMETHVALUE node. The address of a variable of // the returned type can be cast to a func. func MethodValueType(n *ir.SelectorExpr) *types.Type { t := types.NewStruct([]*types.Field{ types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]), types.NewField(base.Pos, Lookup("R"), n.X.Type()), }) t.SetNoalg(true) return t } // type check function definition // To be called by typecheck, not directly. // (Call typecheck.Func instead.) func tcFunc(n *ir.Func) { if base.EnableTrace && base.Flag.LowerT { defer tracePrint("tcFunc", n)(nil) } if name := n.Nname; name.Typecheck() == 0 { base.AssertfAt(name.Type() != nil, n.Pos(), "missing type: %v", name) name.SetTypecheck(1) } } // tcCall typechecks an OCALL node. func tcCall(n *ir.CallExpr, top int) ir.Node { Stmts(n.Init()) // imported rewritten f(g()) calls (#30907) n.Fun = typecheck(n.Fun, ctxExpr|ctxType|ctxCallee) l := n.Fun if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 { l := l.(*ir.Name) if n.IsDDD && l.BuiltinOp != ir.OAPPEND { base.Errorf("invalid use of ... with builtin %v", l) } // builtin: OLEN, OCAP, etc. switch l.BuiltinOp { default: base.Fatalf("unknown builtin %v", l) case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER: n.SetOp(l.BuiltinOp) n.Fun = nil n.SetTypecheck(0) // re-typechecking new op is OK, not a loop return typecheck(n, top) case ir.OCAP, ir.OCLEAR, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA: typecheckargs(n) fallthrough case ir.ONEW: arg, ok := needOneArg(n, "%v", n.Op()) if !ok { n.SetType(nil) return n } u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING: typecheckargs(n) arg1, arg2, ok := needTwoArgs(n) if !ok { n.SetType(nil) return n } b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init } panic("unreachable") } n.Fun = DefaultLit(n.Fun, nil) l = n.Fun if l.Op() == ir.OTYPE { if n.IsDDD { base.Fatalf("invalid use of ... in type conversion to %v", l.Type()) } // pick off before type-checking arguments arg, ok := needOneArg(n, "conversion to %v", l.Type()) if !ok { n.SetType(nil) return n } n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) n.SetType(l.Type()) return tcConv(n) } RewriteNonNameCall(n) typecheckargs(n) t := l.Type() if t == nil { n.SetType(nil) return n } types.CheckSize(t) switch l.Op() { case ir.ODOTINTER: n.SetOp(ir.OCALLINTER) case ir.ODOTMETH: l := l.(*ir.SelectorExpr) n.SetOp(ir.OCALLMETH) // typecheckaste was used here but there wasn't enough // information further down the call chain to know if we // were testing a method receiver for unexported fields. // It isn't necessary, so just do a sanity check. tp := t.Recv().Type if l.X == nil || !types.Identical(l.X.Type(), tp) { base.Fatalf("method receiver") } default: n.SetOp(ir.OCALLFUNC) if t.Kind() != types.TFUNC { if o := l; o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { // be more specific when the non-function // name matches a predeclared function base.Errorf("cannot call non-function %L, declared at %s", l, base.FmtPos(o.Name().Pos())) } else { base.Errorf("cannot call non-function %L", l) } n.SetType(nil) return n } } typecheckaste(ir.OCALL, n.Fun, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.Fun) }) FixVariadicCall(n) FixMethodCall(n) if t.NumResults() == 0 { return n } if t.NumResults() == 1 { n.SetType(l.Type().Result(0).Type) if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME { if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code // (in particular, the exact time of the function call) by introducing temporaries. // In this case, we know getg() always returns the same result within a given function // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. n.SetOp(ir.OGETG) } } return n } // multiple return if top&(ctxMultiOK|ctxStmt) == 0 { base.Errorf("multiple-value %v() in single-value context", l) return n } n.SetType(l.Type().ResultsTuple()) return n } // tcAppend typechecks an OAPPEND node. func tcAppend(n *ir.CallExpr) ir.Node { typecheckargs(n) args := n.Args if len(args) == 0 { base.Errorf("missing arguments to append") n.SetType(nil) return n } t := args[0].Type() if t == nil { n.SetType(nil) return n } n.SetType(t) if !t.IsSlice() { if ir.IsNil(args[0]) { base.Errorf("first argument to append must be typed slice; have untyped nil") n.SetType(nil) return n } base.Errorf("first argument to append must be slice; have %L", t) n.SetType(nil) return n } if n.IsDDD { if len(args) == 1 { base.Errorf("cannot use ... on first argument to append") n.SetType(nil) return n } if len(args) != 2 { base.Errorf("too many arguments to append") n.SetType(nil) return n } // AssignConv is of args[1] not required here, as the // types of args[0] and args[1] don't need to match // (They will both have an underlying type which are // slices of identical base types, or be []byte and string.) // See issue 53888. return n } as := args[1:] for i, n := range as { if n.Type() == nil { continue } as[i] = AssignConv(n, t.Elem(), "append") types.CheckSize(as[i].Type()) // ensure width is calculated for backend } return n } // tcClear typechecks an OCLEAR node. func tcClear(n *ir.UnaryExpr) ir.Node { n.X = Expr(n.X) n.X = DefaultLit(n.X, nil) l := n.X t := l.Type() if t == nil { n.SetType(nil) return n } switch { case t.IsMap(), t.IsSlice(): default: base.Errorf("invalid operation: %v (argument must be a map or slice)", n) n.SetType(nil) return n } return n } // tcClose typechecks an OCLOSE node. func tcClose(n *ir.UnaryExpr) ir.Node { n.X = Expr(n.X) n.X = DefaultLit(n.X, nil) l := n.X t := l.Type() if t == nil { n.SetType(nil) return n } if !t.IsChan() { base.Errorf("invalid operation: %v (non-chan type %v)", n, t) n.SetType(nil) return n } if !t.ChanDir().CanSend() { base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) n.SetType(nil) return n } return n } // tcComplex typechecks an OCOMPLEX node. func tcComplex(n *ir.BinaryExpr) ir.Node { l := Expr(n.X) r := Expr(n.Y) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n } l, r = defaultlit2(l, r, false) if l.Type() == nil || r.Type() == nil { n.SetType(nil) return n } n.X = l n.Y = r if !types.Identical(l.Type(), r.Type()) { base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) n.SetType(nil) return n } var t *types.Type switch l.Type().Kind() { default: base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) n.SetType(nil) return n case types.TIDEAL: t = types.UntypedComplex case types.TFLOAT32: t = types.Types[types.TCOMPLEX64] case types.TFLOAT64: t = types.Types[types.TCOMPLEX128] } n.SetType(t) return n } // tcCopy typechecks an OCOPY node. func tcCopy(n *ir.BinaryExpr) ir.Node { n.SetType(types.Types[types.TINT]) n.X = Expr(n.X) n.X = DefaultLit(n.X, nil) n.Y = Expr(n.Y) n.Y = DefaultLit(n.Y, nil) if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n } // copy([]byte, string) if n.X.Type().IsSlice() && n.Y.Type().IsString() { if types.Identical(n.X.Type().Elem(), types.ByteType) { return n } base.Errorf("arguments to copy have different element types: %L and string", n.X.Type()) n.SetType(nil) return n } if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() { if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() { base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type()) } else if !n.X.Type().IsSlice() { base.Errorf("first argument to copy should be slice; have %L", n.X.Type()) } else { base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type()) } n.SetType(nil) return n } if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) { base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type()) n.SetType(nil) return n } return n } // tcDelete typechecks an ODELETE node. func tcDelete(n *ir.CallExpr) ir.Node { typecheckargs(n) args := n.Args if len(args) == 0 { base.Errorf("missing arguments to delete") n.SetType(nil) return n } if len(args) == 1 { base.Errorf("missing second (key) argument to delete") n.SetType(nil) return n } if len(args) != 2 { base.Errorf("too many arguments to delete") n.SetType(nil) return n } l := args[0] r := args[1] if l.Type() != nil && !l.Type().IsMap() { base.Errorf("first argument to delete must be map; have %L", l.Type()) n.SetType(nil) return n } args[1] = AssignConv(r, l.Type().Key(), "delete") return n } // tcMake typechecks an OMAKE node. func tcMake(n *ir.CallExpr) ir.Node { args := n.Args if len(args) == 0 { base.Errorf("missing argument to make") n.SetType(nil) return n } n.Args = nil l := args[0] l = typecheck(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) return n } i := 1 var nn ir.Node switch t.Kind() { default: base.Errorf("cannot make type %v", t) n.SetType(nil) return n case types.TSLICE: if i >= len(args) { base.Errorf("missing len argument to make(%v)", t) n.SetType(nil) return n } l = args[i] i++ l = Expr(l) var r ir.Node if i < len(args) { r = args[i] i++ r = Expr(r) } if l.Type() == nil || (r != nil && r.Type() == nil) { n.SetType(nil) return n } if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { n.SetType(nil) return n } if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { base.Errorf("len larger than cap in make(%v)", t) n.SetType(nil) return n } nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) case types.TMAP: if i < len(args) { l = args[i] i++ l = Expr(l) l = DefaultLit(l, types.Types[types.TINT]) if l.Type() == nil { n.SetType(nil) return n } if !checkmake(t, "size", &l) { n.SetType(nil) return n } } else { l = ir.NewInt(base.Pos, 0) } nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) nn.SetEsc(n.Esc()) case types.TCHAN: l = nil if i < len(args) { l = args[i] i++ l = Expr(l) l = DefaultLit(l, types.Types[types.TINT]) if l.Type() == nil { n.SetType(nil) return n } if !checkmake(t, "buffer", &l) { n.SetType(nil) return n } } else { l = ir.NewInt(base.Pos, 0) } nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) } if i < len(args) { base.Errorf("too many arguments to make(%v)", t) n.SetType(nil) return n } nn.SetType(t) return nn } // tcMakeSliceCopy typechecks an OMAKESLICECOPY node. func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node { // Errors here are Fatalf instead of Errorf because only the compiler // can construct an OMAKESLICECOPY node. // Components used in OMAKESCLICECOPY that are supplied by parsed source code // have already been typechecked in OMAKE and OCOPY earlier. t := n.Type() if t == nil { base.Fatalf("no type specified for OMAKESLICECOPY") } if !t.IsSlice() { base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) } if n.Len == nil { base.Fatalf("missing len argument for OMAKESLICECOPY") } if n.Cap == nil { base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") } n.Len = Expr(n.Len) n.Cap = Expr(n.Cap) n.Len = DefaultLit(n.Len, types.Types[types.TINT]) if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { base.Errorf("non-integer len argument in OMAKESLICECOPY") } if ir.IsConst(n.Len, constant.Int) { if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) { base.Fatalf("len for OMAKESLICECOPY too large") } if constant.Sign(n.Len.Val()) < 0 { base.Fatalf("len for OMAKESLICECOPY must be non-negative") } } return n } // tcNew typechecks an ONEW node. func tcNew(n *ir.UnaryExpr) ir.Node { if n.X == nil { // Fatalf because the OCALL above checked for us, // so this must be an internally-generated mistake. base.Fatalf("missing argument to new") } l := n.X l = typecheck(l, ctxType) t := l.Type() if t == nil { n.SetType(nil) return n } n.X = l n.SetType(types.NewPtr(t)) return n } // tcPanic typechecks an OPANIC node. func tcPanic(n *ir.UnaryExpr) ir.Node { n.X = Expr(n.X) n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic") if n.X.Type() == nil { n.SetType(nil) return n } return n } // tcPrint typechecks an OPRINT or OPRINTN node. func tcPrint(n *ir.CallExpr) ir.Node { typecheckargs(n) ls := n.Args for i1, n1 := range ls { // Special case for print: int constant is int64, not int. if ir.IsConst(n1, constant.Int) { ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64]) } else { ls[i1] = DefaultLit(ls[i1], nil) } } return n } // tcMinMax typechecks an OMIN or OMAX node. func tcMinMax(n *ir.CallExpr) ir.Node { typecheckargs(n) arg0 := n.Args[0] for _, arg := range n.Args[1:] { if !types.Identical(arg.Type(), arg0.Type()) { base.FatalfAt(n.Pos(), "mismatched arguments: %L and %L", arg0, arg) } } n.SetType(arg0.Type()) return n } // tcRealImag typechecks an OREAL or OIMAG node. func tcRealImag(n *ir.UnaryExpr) ir.Node { n.X = Expr(n.X) l := n.X t := l.Type() if t == nil { n.SetType(nil) return n } // Determine result type. switch t.Kind() { case types.TIDEAL: n.SetType(types.UntypedFloat) case types.TCOMPLEX64: n.SetType(types.Types[types.TFLOAT32]) case types.TCOMPLEX128: n.SetType(types.Types[types.TFLOAT64]) default: base.Errorf("invalid argument %L for %v", l, n.Op()) n.SetType(nil) return n } return n } // tcRecover typechecks an ORECOVER node. func tcRecover(n *ir.CallExpr) ir.Node { if len(n.Args) != 0 { base.Errorf("too many arguments to recover") n.SetType(nil) return n } // FP is equal to caller's SP plus FixedFrameSize. var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil) if off := base.Ctxt.Arch.FixedFrameSize; off != 0 { fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off)) } // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr. fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp) n.SetOp(ir.ORECOVERFP) n.SetType(types.Types[types.TINTER]) n.Args = []ir.Node{Expr(fp)} return n } // tcUnsafeAdd typechecks an OUNSAFEADD node. func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr { n.X = AssignConv(Expr(n.X), types.Types[types.TUNSAFEPTR], "argument to unsafe.Add") n.Y = DefaultLit(Expr(n.Y), types.Types[types.TINT]) if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n } if !n.Y.Type().IsInteger() { n.SetType(nil) return n } n.SetType(n.X.Type()) return n } // tcUnsafeSlice typechecks an OUNSAFESLICE node. func tcUnsafeSlice(n *ir.BinaryExpr) *ir.BinaryExpr { n.X = Expr(n.X) n.Y = Expr(n.Y) if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n } t := n.X.Type() if !t.IsPtr() { base.Errorf("first argument to unsafe.Slice must be pointer; have %L", t) } else if t.Elem().NotInHeap() { // TODO(mdempsky): This can be relaxed, but should only affect the // Go runtime itself. End users should only see not-in-heap // types due to incomplete C structs in cgo, and those types don't // have a meaningful size anyway. base.Errorf("unsafe.Slice of incomplete (or unallocatable) type not allowed") } if !checkunsafesliceorstring(n.Op(), &n.Y) { n.SetType(nil) return n } n.SetType(types.NewSlice(t.Elem())) return n } // tcUnsafeString typechecks an OUNSAFESTRING node. func tcUnsafeString(n *ir.BinaryExpr) *ir.BinaryExpr { n.X = Expr(n.X) n.Y = Expr(n.Y) if n.X.Type() == nil || n.Y.Type() == nil { n.SetType(nil) return n } t := n.X.Type() if !t.IsPtr() || !types.Identical(t.Elem(), types.Types[types.TUINT8]) { base.Errorf("first argument to unsafe.String must be *byte; have %L", t) } if !checkunsafesliceorstring(n.Op(), &n.Y) { n.SetType(nil) return n } n.SetType(types.Types[types.TSTRING]) return n } // ClosureStructIter iterates through a slice of closure variables returning // their type and offset in the closure struct. type ClosureStructIter struct { closureVars []*ir.Name offset int64 next int } // NewClosureStructIter creates a new ClosureStructIter for closureVars. func NewClosureStructIter(closureVars []*ir.Name) *ClosureStructIter { return &ClosureStructIter{ closureVars: closureVars, offset: int64(types.PtrSize), // PtrSize to skip past function entry PC field next: 0, } } // Next returns the next name, type and offset of the next closure variable. // A nil name is returned after the last closure variable. func (iter *ClosureStructIter) Next() (n *ir.Name, typ *types.Type, offset int64) { if iter.next >= len(iter.closureVars) { return nil, nil, 0 } n = iter.closureVars[iter.next] typ = n.Type() if !n.Byval() { typ = types.NewPtr(typ) } iter.next++ offset = types.RoundUp(iter.offset, typ.Alignment()) iter.offset = offset + typ.Size() return n, typ, offset }
go/src/cmd/compile/internal/typecheck/func.go/0
{ "file_path": "go/src/cmd/compile/internal/typecheck/func.go", "repo_id": "go", "token_count": 9237 }
118
// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT. package types import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[Txxx-0] _ = x[TINT8-1] _ = x[TUINT8-2] _ = x[TINT16-3] _ = x[TUINT16-4] _ = x[TINT32-5] _ = x[TUINT32-6] _ = x[TINT64-7] _ = x[TUINT64-8] _ = x[TINT-9] _ = x[TUINT-10] _ = x[TUINTPTR-11] _ = x[TCOMPLEX64-12] _ = x[TCOMPLEX128-13] _ = x[TFLOAT32-14] _ = x[TFLOAT64-15] _ = x[TBOOL-16] _ = x[TPTR-17] _ = x[TFUNC-18] _ = x[TSLICE-19] _ = x[TARRAY-20] _ = x[TSTRUCT-21] _ = x[TCHAN-22] _ = x[TMAP-23] _ = x[TINTER-24] _ = x[TFORW-25] _ = x[TANY-26] _ = x[TSTRING-27] _ = x[TUNSAFEPTR-28] _ = x[TIDEAL-29] _ = x[TNIL-30] _ = x[TBLANK-31] _ = x[TFUNCARGS-32] _ = x[TCHANARGS-33] _ = x[TSSA-34] _ = x[TTUPLE-35] _ = x[TRESULTS-36] _ = x[NTYPE-37] } const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE" var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202} func (i Kind) String() string { if i >= Kind(len(_Kind_index)-1) { return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" } return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] }
go/src/cmd/compile/internal/types/kind_string.go/0
{ "file_path": "go/src/cmd/compile/internal/types/kind_string.go", "repo_id": "go", "token_count": 823 }
119
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements initialization and assignment checks. package types2 import ( "cmd/compile/internal/syntax" "fmt" . "internal/types/errors" "strings" ) // assignment reports whether x can be assigned to a variable of type T, // if necessary by attempting to convert untyped values to the appropriate // type. context describes the context in which the assignment takes place. // Use T == nil to indicate assignment to an untyped blank identifier. // If the assignment check fails, x.mode is set to invalid. func (check *Checker) assignment(x *operand, T Type, context string) { check.singleValue(x) switch x.mode { case invalid: return // error reported before case nilvalue: assert(isTypes2) // ok case constant_, variable, mapindex, value, commaok, commaerr: // ok default: // we may get here because of other problems (go.dev/issue/39634, crash 12) // TODO(gri) do we need a new "generic" error code here? check.errorf(x, IncompatibleAssign, "cannot assign %s to %s in %s", x, T, context) x.mode = invalid return } if isUntyped(x.typ) { target := T // spec: "If an untyped constant is assigned to a variable of interface // type or the blank identifier, the constant is first converted to type // bool, rune, int, float64, complex128 or string respectively, depending // on whether the value is a boolean, rune, integer, floating-point, // complex, or string constant." if isTypes2 { if x.isNil() { if T == nil { check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) x.mode = invalid return } } else if T == nil || isNonTypeParamInterface(T) { target = Default(x.typ) } } else { // go/types if T == nil || isNonTypeParamInterface(T) { if T == nil && x.typ == Typ[UntypedNil] { check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) x.mode = invalid return } target = Default(x.typ) } } newType, val, code := check.implicitTypeAndValue(x, target) if code != 0 { msg := check.sprintf("cannot use %s as %s value in %s", x, target, context) switch code { case TruncatedFloat: msg += " (truncated)" case NumericOverflow: msg += " (overflows)" default: code = IncompatibleAssign } check.error(x, code, msg) x.mode = invalid return } if val != nil { x.val = val check.updateExprVal(x.expr, val) } if newType != x.typ { x.typ = newType check.updateExprType(x.expr, newType, false) } } // x.typ is typed // A generic (non-instantiated) function value cannot be assigned to a variable. if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) x.mode = invalid return } // spec: "If a left-hand side is the blank identifier, any typed or // non-constant value except for the predeclared identifier nil may // be assigned to it." if T == nil { return } cause := "" if ok, code := x.assignableTo(check, T, &cause); !ok { if cause != "" { check.errorf(x, code, "cannot use %s as %s value in %s: %s", x, T, context, cause) } else { check.errorf(x, code, "cannot use %s as %s value in %s", x, T, context) } x.mode = invalid } } func (check *Checker) initConst(lhs *Const, x *operand) { if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) { if lhs.typ == nil { lhs.typ = Typ[Invalid] } return } // rhs must be a constant if x.mode != constant_ { check.errorf(x, InvalidConstInit, "%s is not constant", x) if lhs.typ == nil { lhs.typ = Typ[Invalid] } return } assert(isConstType(x.typ)) // If the lhs doesn't have a type yet, use the type of x. if lhs.typ == nil { lhs.typ = x.typ } check.assignment(x, lhs.typ, "constant declaration") if x.mode == invalid { return } lhs.val = x.val } // initVar checks the initialization lhs = x in a variable declaration. // If lhs doesn't have a type yet, it is given the type of x, // or Typ[Invalid] in case of an error. // If the initialization check fails, x.mode is set to invalid. func (check *Checker) initVar(lhs *Var, x *operand, context string) { if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) { if lhs.typ == nil { lhs.typ = Typ[Invalid] } x.mode = invalid return } // If lhs doesn't have a type yet, use the type of x. if lhs.typ == nil { typ := x.typ if isUntyped(typ) { // convert untyped types to default types if typ == Typ[UntypedNil] { check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) lhs.typ = Typ[Invalid] x.mode = invalid return } typ = Default(typ) } lhs.typ = typ } check.assignment(x, lhs.typ, context) } // lhsVar checks a lhs variable in an assignment and returns its type. // lhsVar takes care of not counting a lhs identifier as a "use" of // that identifier. The result is nil if it is the blank identifier, // and Typ[Invalid] if it is an invalid lhs expression. func (check *Checker) lhsVar(lhs syntax.Expr) Type { // Determine if the lhs is a (possibly parenthesized) identifier. ident, _ := syntax.Unparen(lhs).(*syntax.Name) // Don't evaluate lhs if it is the blank identifier. if ident != nil && ident.Value == "_" { check.recordDef(ident, nil) return nil } // If the lhs is an identifier denoting a variable v, this reference // is not a 'use' of v. Remember current value of v.used and restore // after evaluating the lhs via check.expr. var v *Var var v_used bool if ident != nil { if obj := check.lookup(ident.Value); obj != nil { // It's ok to mark non-local variables, but ignore variables // from other packages to avoid potential race conditions with // dot-imported variables. if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg { v = w v_used = v.used } } } var x operand check.expr(nil, &x, lhs) if v != nil { v.used = v_used // restore v.used } if x.mode == invalid || !isValid(x.typ) { return Typ[Invalid] } // spec: "Each left-hand side operand must be addressable, a map index // expression, or the blank identifier. Operands may be parenthesized." switch x.mode { case invalid: return Typ[Invalid] case variable, mapindex: // ok default: if sel, ok := x.expr.(*syntax.SelectorExpr); ok { var op operand check.expr(nil, &op, sel.X) if op.mode == mapindex { check.errorf(&x, UnaddressableFieldAssign, "cannot assign to struct field %s in map", ExprString(x.expr)) return Typ[Invalid] } } check.errorf(&x, UnassignableOperand, "cannot assign to %s (neither addressable nor a map index expression)", x.expr) return Typ[Invalid] } return x.typ } // assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil). // If x != nil, it must be the evaluation of rhs (and rhs will be ignored). // If the assignment check fails and x != nil, x.mode is set to invalid. func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) { T := check.lhsVar(lhs) // nil if lhs is _ if !isValid(T) { if x != nil { x.mode = invalid } else { check.use(rhs) } return } if x == nil { var target *target // avoid calling ExprString if not needed if T != nil { if _, ok := under(T).(*Signature); ok { target = newTarget(T, ExprString(lhs)) } } x = new(operand) check.expr(target, x, rhs) } if T == nil && context == "assignment" { context = "assignment to _ identifier" } check.assignment(x, T, context) } // operandTypes returns the list of types for the given operands. func operandTypes(list []*operand) (res []Type) { for _, x := range list { res = append(res, x.typ) } return res } // varTypes returns the list of types for the given variables. func varTypes(list []*Var) (res []Type) { for _, x := range list { res = append(res, x.typ) } return res } // typesSummary returns a string of the form "(t1, t2, ...)" where the // ti's are user-friendly string representations for the given types. // If variadic is set and the last type is a slice, its string is of // the form "...E" where E is the slice's element type. func (check *Checker) typesSummary(list []Type, variadic bool) string { var res []string for i, t := range list { var s string switch { case t == nil: fallthrough // should not happen but be cautious case !isValid(t): s = "unknown type" case isUntyped(t): if isNumeric(t) { // Do not imply a specific type requirement: // "have number, want float64" is better than // "have untyped int, want float64" or // "have int, want float64". s = "number" } else { // If we don't have a number, omit the "untyped" qualifier // for compactness. s = strings.Replace(t.(*Basic).name, "untyped ", "", -1) } case variadic && i == len(list)-1: s = check.sprintf("...%s", t.(*Slice).elem) } if s == "" { s = check.sprintf("%s", t) } res = append(res, s) } return "(" + strings.Join(res, ", ") + ")" } func measure(x int, unit string) string { if x != 1 { unit += "s" } return fmt.Sprintf("%d %s", x, unit) } func (check *Checker) assignError(rhs []syntax.Expr, l, r int) { vars := measure(l, "variable") vals := measure(r, "value") rhs0 := rhs[0] if len(rhs) == 1 { if call, _ := syntax.Unparen(rhs0).(*syntax.CallExpr); call != nil { check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals) return } } check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s", vars, vals) } func (check *Checker) returnError(at poser, lhs []*Var, rhs []*operand) { l, r := len(lhs), len(rhs) qualifier := "not enough" if r > l { at = rhs[l] // report at first extra value qualifier = "too many" } else if r > 0 { at = rhs[r-1] // report at last value } err := check.newError(WrongResultCount) err.addf(at, "%s return values", qualifier) err.addf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false)) err.addf(nopos, "want %s", check.typesSummary(varTypes(lhs), false)) err.report() } // initVars type-checks assignments of initialization expressions orig_rhs // to variables lhs. // If returnStmt is non-nil, initVars type-checks the implicit assignment // of result expressions orig_rhs to function result parameters lhs. func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt syntax.Stmt) { context := "assignment" if returnStmt != nil { context = "return statement" } l, r := len(lhs), len(orig_rhs) // If l == 1 and the rhs is a single call, for a better // error message don't handle it as n:n mapping below. isCall := false if r == 1 { _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr) } // If we have a n:n mapping from lhs variable to rhs expression, // each value can be assigned to its corresponding variable. if l == r && !isCall { var x operand for i, lhs := range lhs { desc := lhs.name if returnStmt != nil && desc == "" { desc = "result variable" } check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i]) check.initVar(lhs, &x, context) } return } // If we don't have an n:n mapping, the rhs must be a single expression // resulting in 2 or more values; otherwise we have an assignment mismatch. if r != 1 { // Only report a mismatch error if there are no other errors on the rhs. if check.use(orig_rhs...) { if returnStmt != nil { rhs := check.exprList(orig_rhs) check.returnError(returnStmt, lhs, rhs) } else { check.assignError(orig_rhs, l, r) } } // ensure that LHS variables have a type for _, v := range lhs { if v.typ == nil { v.typ = Typ[Invalid] } } return } rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2 && returnStmt == nil) r = len(rhs) if l == r { for i, lhs := range lhs { check.initVar(lhs, rhs[i], context) } // Only record comma-ok expression if both initializations succeeded // (go.dev/issue/59371). if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid { check.recordCommaOkTypes(orig_rhs[0], rhs) } return } // In all other cases we have an assignment mismatch. // Only report a mismatch error if there are no other errors on the rhs. if rhs[0].mode != invalid { if returnStmt != nil { check.returnError(returnStmt, lhs, rhs) } else { check.assignError(orig_rhs, l, r) } } // ensure that LHS variables have a type for _, v := range lhs { if v.typ == nil { v.typ = Typ[Invalid] } } // orig_rhs[0] was already evaluated } // assignVars type-checks assignments of expressions orig_rhs to variables lhs. func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) { l, r := len(lhs), len(orig_rhs) // If l == 1 and the rhs is a single call, for a better // error message don't handle it as n:n mapping below. isCall := false if r == 1 { _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr) } // If we have a n:n mapping from lhs variable to rhs expression, // each value can be assigned to its corresponding variable. if l == r && !isCall { for i, lhs := range lhs { check.assignVar(lhs, orig_rhs[i], nil, "assignment") } return } // If we don't have an n:n mapping, the rhs must be a single expression // resulting in 2 or more values; otherwise we have an assignment mismatch. if r != 1 { // Only report a mismatch error if there are no other errors on the lhs or rhs. okLHS := check.useLHS(lhs...) okRHS := check.use(orig_rhs...) if okLHS && okRHS { check.assignError(orig_rhs, l, r) } return } rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2) r = len(rhs) if l == r { for i, lhs := range lhs { check.assignVar(lhs, nil, rhs[i], "assignment") } // Only record comma-ok expression if both assignments succeeded // (go.dev/issue/59371). if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid { check.recordCommaOkTypes(orig_rhs[0], rhs) } return } // In all other cases we have an assignment mismatch. // Only report a mismatch error if there are no other errors on the rhs. if rhs[0].mode != invalid { check.assignError(orig_rhs, l, r) } check.useLHS(lhs...) // orig_rhs[0] was already evaluated } func (check *Checker) shortVarDecl(pos poser, lhs, rhs []syntax.Expr) { top := len(check.delayed) scope := check.scope // collect lhs variables seen := make(map[string]bool, len(lhs)) lhsVars := make([]*Var, len(lhs)) newVars := make([]*Var, 0, len(lhs)) hasErr := false for i, lhs := range lhs { ident, _ := lhs.(*syntax.Name) if ident == nil { check.useLHS(lhs) // TODO(gri) This is redundant with a go/parser error. Consider omitting in go/types? check.errorf(lhs, BadDecl, "non-name %s on left side of :=", lhs) hasErr = true continue } name := ident.Value if name != "_" { if seen[name] { check.errorf(lhs, RepeatedDecl, "%s repeated on left side of :=", lhs) hasErr = true continue } seen[name] = true } // Use the correct obj if the ident is redeclared. The // variable's scope starts after the declaration; so we // must use Scope.Lookup here and call Scope.Insert // (via check.declare) later. if alt := scope.Lookup(name); alt != nil { check.recordUse(ident, alt) // redeclared object must be a variable if obj, _ := alt.(*Var); obj != nil { lhsVars[i] = obj } else { check.errorf(lhs, UnassignableOperand, "cannot assign to %s", lhs) hasErr = true } continue } // declare new variable obj := NewVar(ident.Pos(), check.pkg, name, nil) lhsVars[i] = obj if name != "_" { newVars = append(newVars, obj) } check.recordDef(ident, obj) } // create dummy variables where the lhs is invalid for i, obj := range lhsVars { if obj == nil { lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil) } } check.initVars(lhsVars, rhs, nil) // process function literals in rhs expressions before scope changes check.processDelayed(top) if len(newVars) == 0 && !hasErr { check.softErrorf(pos, NoNewVar, "no new variables on left side of :=") return } // declare new variables // spec: "The scope of a constant or variable identifier declared inside // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl // for short variable declarations) and ends at the end of the innermost // containing block." scopePos := endPos(rhs[len(rhs)-1]) for _, obj := range newVars { check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called } }
go/src/cmd/compile/internal/types2/assignments.go/0
{ "file_path": "go/src/cmd/compile/internal/types2/assignments.go", "repo_id": "go", "token_count": 6447 }
120
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements error reporting. package types2 import ( "cmd/compile/internal/syntax" "fmt" . "internal/types/errors" "runtime" "strings" ) func assert(p bool) { if !p { msg := "assertion failed" // Include information about the assertion location. Due to panic recovery, // this location is otherwise buried in the middle of the panicking stack. if _, file, line, ok := runtime.Caller(1); ok { msg = fmt.Sprintf("%s:%d: %s", file, line, msg) } panic(msg) } } // An errorDesc describes part of a type-checking error. type errorDesc struct { pos syntax.Pos msg string } // An error_ represents a type-checking error. // A new error_ is created with Checker.newError. // To report an error_, call error_.report. type error_ struct { check *Checker desc []errorDesc code Code soft bool // TODO(gri) eventually determine this from an error code } // newError returns a new error_ with the given error code. func (check *Checker) newError(code Code) *error_ { if code == 0 { panic("error code must not be 0") } return &error_{check: check, code: code} } // addf adds formatted error information to err. // It may be called multiple times to provide additional information. // The position of the first call to addf determines the position of the reported Error. // Subsequent calls to addf provide additional information in the form of additional lines // in the error message (types2) or continuation errors identified by a tab-indented error // message (go/types). func (err *error_) addf(at poser, format string, args ...interface{}) { err.desc = append(err.desc, errorDesc{atPos(at), err.check.sprintf(format, args...)}) } // addAltDecl is a specialized form of addf reporting another declaration of obj. func (err *error_) addAltDecl(obj Object) { if pos := obj.Pos(); pos.IsKnown() { // We use "other" rather than "previous" here because // the first declaration seen may not be textually // earlier in the source. err.addf(obj, "other declaration of %s", obj.Name()) } } func (err *error_) empty() bool { return err.desc == nil } func (err *error_) pos() syntax.Pos { if err.empty() { return nopos } return err.desc[0].pos } // msg returns the formatted error message without the primary error position pos(). func (err *error_) msg() string { if err.empty() { return "no error" } var buf strings.Builder for i := range err.desc { p := &err.desc[i] if i > 0 { fmt.Fprint(&buf, "\n\t") if p.pos.IsKnown() { fmt.Fprintf(&buf, "%s: ", p.pos) } } buf.WriteString(p.msg) } return buf.String() } // report reports the error err, setting check.firstError if necessary. func (err *error_) report() { if err.empty() { panic("no error") } // Cheap trick: Don't report errors with messages containing // "invalid operand" or "invalid type" as those tend to be // follow-on errors which don't add useful information. Only // exclude them if these strings are not at the beginning, // and only if we have at least one error already reported. check := err.check if check.firstErr != nil { // It is sufficient to look at the first sub-error only. msg := err.desc[0].msg if strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0 { return } } if check.conf.Trace { check.trace(err.pos(), "ERROR: %s (code = %d)", err.desc[0].msg, err.code) } // In go/types, if there is a sub-error with a valid position, // call the typechecker error handler for each sub-error. // Otherwise, call it once, with a single combined message. multiError := false if !isTypes2 { for i := 1; i < len(err.desc); i++ { if err.desc[i].pos.IsKnown() { multiError = true break } } } if multiError { for i := range err.desc { p := &err.desc[i] check.handleError(i, p.pos, err.code, p.msg, err.soft) } } else { check.handleError(0, err.pos(), err.code, err.msg(), err.soft) } // make sure the error is not reported twice err.desc = nil } // handleError should only be called by error_.report. func (check *Checker) handleError(index int, pos syntax.Pos, code Code, msg string, soft bool) { assert(code != 0) if index == 0 { // If we are encountering an error while evaluating an inherited // constant initialization expression, pos is the position of // the original expression, and not of the currently declared // constant identifier. Use the provided errpos instead. // TODO(gri) We may also want to augment the error message and // refer to the position (pos) in the original expression. if check.errpos.Pos().IsKnown() { assert(check.iota != nil) pos = check.errpos } // Report invalid syntax trees explicitly. if code == InvalidSyntaxTree { msg = "invalid syntax tree: " + msg } // If we have a URL for error codes, add a link to the first line. if check.conf.ErrorURL != "" { url := fmt.Sprintf(check.conf.ErrorURL, code) if i := strings.Index(msg, "\n"); i >= 0 { msg = msg[:i] + url + msg[i:] } else { msg += url } } } else { // Indent sub-error. // Position information is passed explicitly to Error, below. msg = "\t" + msg } e := Error{ Pos: pos, Msg: stripAnnotations(msg), Full: msg, Soft: soft, Code: code, } if check.firstErr == nil { check.firstErr = e } f := check.conf.Error if f == nil { panic(bailout{}) // record first error and exit } f(e) } const ( invalidArg = "invalid argument: " invalidOp = "invalid operation: " ) // The poser interface is used to extract the position of type-checker errors. type poser interface { Pos() syntax.Pos } func (check *Checker) error(at poser, code Code, msg string) { err := check.newError(code) err.addf(at, "%s", msg) err.report() } func (check *Checker) errorf(at poser, code Code, format string, args ...any) { err := check.newError(code) err.addf(at, format, args...) err.report() } func (check *Checker) softErrorf(at poser, code Code, format string, args ...any) { err := check.newError(code) err.addf(at, format, args...) err.soft = true err.report() } func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...any) { msg := check.sprintf(format, args...) err := check.newError(UnsupportedFeature) err.addf(at, "%s requires %s or later", msg, v) err.report() } // atPos reports the left (= start) position of at. func atPos(at poser) syntax.Pos { switch x := at.(type) { case *operand: if x.expr != nil { return syntax.StartPos(x.expr) } case syntax.Node: return syntax.StartPos(x) } return at.Pos() }
go/src/cmd/compile/internal/types2/errors.go/0
{ "file_path": "go/src/cmd/compile/internal/types2/errors.go", "repo_id": "go", "token_count": 2358 }
121
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements type unification. // // Type unification attempts to make two types x and y structurally // equivalent by determining the types for a given list of (bound) // type parameters which may occur within x and y. If x and y are // structurally different (say []T vs chan T), or conflicting // types are determined for type parameters, unification fails. // If unification succeeds, as a side-effect, the types of the // bound type parameters may be determined. // // Unification typically requires multiple calls u.unify(x, y) to // a given unifier u, with various combinations of types x and y. // In each call, additional type parameter types may be determined // as a side effect and recorded in u. // If a call fails (returns false), unification fails. // // In the unification context, structural equivalence of two types // ignores the difference between a defined type and its underlying // type if one type is a defined type and the other one is not. // It also ignores the difference between an (external, unbound) // type parameter and its core type. // If two types are not structurally equivalent, they cannot be Go // identical types. On the other hand, if they are structurally // equivalent, they may be Go identical or at least assignable, or // they may be in the type set of a constraint. // Whether they indeed are identical or assignable is determined // upon instantiation and function argument passing. package types2 import ( "bytes" "fmt" "sort" "strings" ) const ( // Upper limit for recursion depth. Used to catch infinite recursions // due to implementation issues (e.g., see issues go.dev/issue/48619, go.dev/issue/48656). unificationDepthLimit = 50 // Whether to panic when unificationDepthLimit is reached. // If disabled, a recursion depth overflow results in a (quiet) // unification failure. panicAtUnificationDepthLimit = true // If enableCoreTypeUnification is set, unification will consider // the core types, if any, of non-local (unbound) type parameters. enableCoreTypeUnification = true // If traceInference is set, unification will print a trace of its operation. // Interpretation of trace: // x ≡ y attempt to unify types x and y // p ➞ y type parameter p is set to type y (p is inferred to be y) // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa) // x ≢ y types x and y cannot be unified // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types traceInference = false ) // A unifier maintains a list of type parameters and // corresponding types inferred for each type parameter. // A unifier is created by calling newUnifier. type unifier struct { // handles maps each type parameter to its inferred type through // an indirection *Type called (inferred type) "handle". // Initially, each type parameter has its own, separate handle, // with a nil (i.e., not yet inferred) type. // After a type parameter P is unified with a type parameter Q, // P and Q share the same handle (and thus type). This ensures // that inferring the type for a given type parameter P will // automatically infer the same type for all other parameters // unified (joined) with P. handles map[*TypeParam]*Type depth int // recursion depth during unification enableInterfaceInference bool // use shared methods for better inference } // newUnifier returns a new unifier initialized with the given type parameter // and corresponding type argument lists. The type argument list may be shorter // than the type parameter list, and it may contain nil types. Matching type // parameters and arguments must have the same index. func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier { assert(len(tparams) >= len(targs)) handles := make(map[*TypeParam]*Type, len(tparams)) // Allocate all handles up-front: in a correct program, all type parameters // must be resolved and thus eventually will get a handle. // Also, sharing of handles caused by unified type parameters is rare and // so it's ok to not optimize for that case (and delay handle allocation). for i, x := range tparams { var t Type if i < len(targs) { t = targs[i] } handles[x] = &t } return &unifier{handles, 0, enableInterfaceInference} } // unifyMode controls the behavior of the unifier. type unifyMode uint const ( // If assign is set, we are unifying types involved in an assignment: // they may match inexactly at the top, but element types must match // exactly. assign unifyMode = 1 << iota // If exact is set, types unify if they are identical (or can be // made identical with suitable arguments for type parameters). // Otherwise, a named type and a type literal unify if their // underlying types unify, channel directions are ignored, and // if there is an interface, the other type must implement the // interface. exact ) func (m unifyMode) String() string { switch m { case 0: return "inexact" case assign: return "assign" case exact: return "exact" case assign | exact: return "assign, exact" } return fmt.Sprintf("mode %d", m) } // unify attempts to unify x and y and reports whether it succeeded. // As a side-effect, types may be inferred for type parameters. // The mode parameter controls how types are compared. func (u *unifier) unify(x, y Type, mode unifyMode) bool { return u.nify(x, y, mode, nil) } func (u *unifier) tracef(format string, args ...interface{}) { fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...)) } // String returns a string representation of the current mapping // from type parameters to types. func (u *unifier) String() string { // sort type parameters for reproducible strings tparams := make(typeParamsById, len(u.handles)) i := 0 for tpar := range u.handles { tparams[i] = tpar i++ } sort.Sort(tparams) var buf bytes.Buffer w := newTypeWriter(&buf, nil) w.byte('[') for i, x := range tparams { if i > 0 { w.string(", ") } w.typ(x) w.string(": ") w.typ(u.at(x)) } w.byte(']') return buf.String() } type typeParamsById []*TypeParam func (s typeParamsById) Len() int { return len(s) } func (s typeParamsById) Less(i, j int) bool { return s[i].id < s[j].id } func (s typeParamsById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // join unifies the given type parameters x and y. // If both type parameters already have a type associated with them // and they are not joined, join fails and returns false. func (u *unifier) join(x, y *TypeParam) bool { if traceInference { u.tracef("%s ⇄ %s", x, y) } switch hx, hy := u.handles[x], u.handles[y]; { case hx == hy: // Both type parameters already share the same handle. Nothing to do. case *hx != nil && *hy != nil: // Both type parameters have (possibly different) inferred types. Cannot join. return false case *hx != nil: // Only type parameter x has an inferred type. Use handle of x. u.setHandle(y, hx) // This case is treated like the default case. // case *hy != nil: // // Only type parameter y has an inferred type. Use handle of y. // u.setHandle(x, hy) default: // Neither type parameter has an inferred type. Use handle of y. u.setHandle(x, hy) } return true } // asBoundTypeParam returns x.(*TypeParam) if x is a type parameter recorded with u. // Otherwise, the result is nil. func (u *unifier) asBoundTypeParam(x Type) *TypeParam { if x, _ := Unalias(x).(*TypeParam); x != nil { if _, found := u.handles[x]; found { return x } } return nil } // setHandle sets the handle for type parameter x // (and all its joined type parameters) to h. func (u *unifier) setHandle(x *TypeParam, h *Type) { hx := u.handles[x] assert(hx != nil) for y, hy := range u.handles { if hy == hx { u.handles[y] = h } } } // at returns the (possibly nil) type for type parameter x. func (u *unifier) at(x *TypeParam) Type { return *u.handles[x] } // set sets the type t for type parameter x; // t must not be nil. func (u *unifier) set(x *TypeParam, t Type) { assert(t != nil) if traceInference { u.tracef("%s ➞ %s", x, t) } *u.handles[x] = t } // unknowns returns the number of type parameters for which no type has been set yet. func (u *unifier) unknowns() int { n := 0 for _, h := range u.handles { if *h == nil { n++ } } return n } // inferred returns the list of inferred types for the given type parameter list. // The result is never nil and has the same length as tparams; result types that // could not be inferred are nil. Corresponding type parameters and result types // have identical indices. func (u *unifier) inferred(tparams []*TypeParam) []Type { list := make([]Type, len(tparams)) for i, x := range tparams { list[i] = u.at(x) } return list } // asInterface returns the underlying type of x as an interface if // it is a non-type parameter interface. Otherwise it returns nil. func asInterface(x Type) (i *Interface) { if _, ok := Unalias(x).(*TypeParam); !ok { i, _ = under(x).(*Interface) } return i } // nify implements the core unification algorithm which is an // adapted version of Checker.identical. For changes to that // code the corresponding changes should be made here. // Must not be called directly from outside the unifier. func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { u.depth++ if traceInference { u.tracef("%s ≡ %s\t// %s", x, y, mode) } defer func() { if traceInference && !result { u.tracef("%s ≢ %s", x, y) } u.depth-- }() // nothing to do if x == y if x == y || Unalias(x) == Unalias(y) { return true } // Stop gap for cases where unification fails. if u.depth > unificationDepthLimit { if traceInference { u.tracef("depth %d >= %d", u.depth, unificationDepthLimit) } if panicAtUnificationDepthLimit { panic("unification reached recursion depth limit") } return false } // Unification is symmetric, so we can swap the operands. // Ensure that if we have at least one // - defined type, make sure one is in y // - type parameter recorded with u, make sure one is in x if asNamed(x) != nil || u.asBoundTypeParam(y) != nil { if traceInference { u.tracef("%s ≡ %s\t// swap", y, x) } x, y = y, x } // Unification will fail if we match a defined type against a type literal. // If we are matching types in an assignment, at the top-level, types with // the same type structure are permitted as long as at least one of them // is not a defined type. To accommodate for that possibility, we continue // unification with the underlying type of a defined type if the other type // is a type literal. This is controlled by the exact unification mode. // We also continue if the other type is a basic type because basic types // are valid underlying types and may appear as core types of type constraints. // If we exclude them, inferred defined types for type parameters may not // match against the core types of their constraints (even though they might // correctly match against some of the types in the constraint's type set). // Finally, if unification (incorrectly) succeeds by matching the underlying // type of a defined type against a basic type (because we include basic types // as type literals here), and if that leads to an incorrectly inferred type, // we will fail at function instantiation or argument assignment time. // // If we have at least one defined type, there is one in y. if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) { if traceInference { u.tracef("%s ≡ under %s", x, ny) } y = ny.under() // Per the spec, a defined type cannot have an underlying type // that is a type parameter. assert(!isTypeParam(y)) // x and y may be identical now if x == y || Unalias(x) == Unalias(y) { return true } } // Cases where at least one of x or y is a type parameter recorded with u. // If we have at least one type parameter, there is one in x. // If we have exactly one type parameter, because it is in x, // isTypeLit(x) is false and y was not changed above. In other // words, if y was a defined type, it is still a defined type // (relevant for the logic below). switch px, py := u.asBoundTypeParam(x), u.asBoundTypeParam(y); { case px != nil && py != nil: // both x and y are type parameters if u.join(px, py) { return true } // both x and y have an inferred type - they must match return u.nify(u.at(px), u.at(py), mode, p) case px != nil: // x is a type parameter, y is not if x := u.at(px); x != nil { // x has an inferred type which must match y if u.nify(x, y, mode, p) { // We have a match, possibly through underlying types. xi := asInterface(x) yi := asInterface(y) xn := asNamed(x) != nil yn := asNamed(y) != nil // If we have two interfaces, what to do depends on // whether they are named and their method sets. if xi != nil && yi != nil { // Both types are interfaces. // If both types are defined types, they must be identical // because unification doesn't know which type has the "right" name. if xn && yn { return Identical(x, y) } // In all other cases, the method sets must match. // The types unified so we know that corresponding methods // match and we can simply compare the number of methods. // TODO(gri) We may be able to relax this rule and select // the more general interface. But if one of them is a defined // type, it's not clear how to choose and whether we introduce // an order dependency or not. Requiring the same method set // is conservative. if len(xi.typeSet().methods) != len(yi.typeSet().methods) { return false } } else if xi != nil || yi != nil { // One but not both of them are interfaces. // In this case, either x or y could be viable matches for the corresponding // type parameter, which means choosing either introduces an order dependence. // Therefore, we must fail unification (go.dev/issue/60933). return false } // If we have inexact unification and one of x or y is a defined type, select the // defined type. This ensures that in a series of types, all matching against the // same type parameter, we infer a defined type if there is one, independent of // order. Type inference or assignment may fail, which is ok. // Selecting a defined type, if any, ensures that we don't lose the type name; // and since we have inexact unification, a value of equally named or matching // undefined type remains assignable (go.dev/issue/43056). // // Similarly, if we have inexact unification and there are no defined types but // channel types, select a directed channel, if any. This ensures that in a series // of unnamed types, all matching against the same type parameter, we infer the // directed channel if there is one, independent of order. // Selecting a directional channel, if any, ensures that a value of another // inexactly unifying channel type remains assignable (go.dev/issue/62157). // // If we have multiple defined channel types, they are either identical or we // have assignment conflicts, so we can ignore directionality in this case. // // If we have defined and literal channel types, a defined type wins to avoid // order dependencies. if mode&exact == 0 { switch { case xn: // x is a defined type: nothing to do. case yn: // x is not a defined type and y is a defined type: select y. u.set(px, y) default: // Neither x nor y are defined types. if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { // y is a directed channel type: select y. u.set(px, y) } } } return true } return false } // otherwise, infer type from y u.set(px, y) return true } // x != y if we get here assert(x != y && Unalias(x) != Unalias(y)) // If u.EnableInterfaceInference is set and we don't require exact unification, // if both types are interfaces, one interface must have a subset of the // methods of the other and corresponding method signatures must unify. // If only one type is an interface, all its methods must be present in the // other type and corresponding method signatures must unify. if u.enableInterfaceInference && mode&exact == 0 { // One or both interfaces may be defined types. // Look under the name, but not under type parameters (go.dev/issue/60564). xi := asInterface(x) yi := asInterface(y) // If we have two interfaces, check the type terms for equivalence, // and unify common methods if possible. if xi != nil && yi != nil { xset := xi.typeSet() yset := yi.typeSet() if xset.comparable != yset.comparable { return false } // For now we require terms to be equal. // We should be able to relax this as well, eventually. if !xset.terms.equal(yset.terms) { return false } // Interface types are the only types where cycles can occur // that are not "terminated" via named types; and such cycles // can only be created via method parameter types that are // anonymous interfaces (directly or indirectly) embedding // the current interface. Example: // // type T interface { // m() interface{T} // } // // If two such (differently named) interfaces are compared, // endless recursion occurs if the cycle is not detected. // // If x and y were compared before, they must be equal // (if they were not, the recursion would have stopped); // search the ifacePair stack for the same pair. // // This is a quadratic algorithm, but in practice these stacks // are extremely short (bounded by the nesting depth of interface // type declarations that recur via parameter types, an extremely // rare occurrence). An alternative implementation might use a // "visited" map, but that is probably less efficient overall. q := &ifacePair{xi, yi, p} for p != nil { if p.identical(q) { return true // same pair was compared before } p = p.prev } // The method set of x must be a subset of the method set // of y or vice versa, and the common methods must unify. xmethods := xset.methods ymethods := yset.methods // The smaller method set must be the subset, if it exists. if len(xmethods) > len(ymethods) { xmethods, ymethods = ymethods, xmethods } // len(xmethods) <= len(ymethods) // Collect the ymethods in a map for quick lookup. ymap := make(map[string]*Func, len(ymethods)) for _, ym := range ymethods { ymap[ym.Id()] = ym } // All xmethods must exist in ymethods and corresponding signatures must unify. for _, xm := range xmethods { if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) { return false } } return true } // We don't have two interfaces. If we have one, make sure it's in xi. if yi != nil { xi = yi y = x } // If we have one interface, at a minimum each of the interface methods // must be implemented and thus unify with a corresponding method from // the non-interface type, otherwise unification fails. if xi != nil { // All xi methods must exist in y and corresponding signatures must unify. xmethods := xi.typeSet().methods for _, xm := range xmethods { obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name) if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) { return false } } return true } } // Unless we have exact unification, neither x nor y are interfaces now. // Except for unbound type parameters (see below), x and y must be structurally // equivalent to unify. // If we get here and x or y is a type parameter, they are unbound // (not recorded with the unifier). // Ensure that if we have at least one type parameter, it is in x // (the earlier swap checks for _recorded_ type parameters only). // This ensures that the switch switches on the type parameter. // // TODO(gri) Factor out type parameter handling from the switch. if isTypeParam(y) { if traceInference { u.tracef("%s ≡ %s\t// swap", y, x) } x, y = y, x } // Type elements (array, slice, etc. elements) use emode for unification. // Element types must match exactly if the types are used in an assignment. emode := mode if mode&assign != 0 { emode |= exact } // Continue with unaliased types but don't lose original alias names, if any (go.dev/issue/67628). xorig, x := x, Unalias(x) yorig, y := y, Unalias(y) switch x := x.(type) { case *Basic: // Basic types are singletons except for the rune and byte // aliases, thus we cannot solely rely on the x == y check // above. See also comment in TypeName.IsAlias. if y, ok := y.(*Basic); ok { return x.kind == y.kind } case *Array: // Two array types unify if they have the same array length // and their element types unify. if y, ok := y.(*Array); ok { // If one or both array lengths are unknown (< 0) due to some error, // assume they are the same to avoid spurious follow-on errors. return (x.len < 0 || y.len < 0 || x.len == y.len) && u.nify(x.elem, y.elem, emode, p) } case *Slice: // Two slice types unify if their element types unify. if y, ok := y.(*Slice); ok { return u.nify(x.elem, y.elem, emode, p) } case *Struct: // Two struct types unify if they have the same sequence of fields, // and if corresponding fields have the same names, their (field) types unify, // and they have identical tags. Two embedded fields are considered to have the same // name. Lower-case field names from different packages are always different. if y, ok := y.(*Struct); ok { if x.NumFields() == y.NumFields() { for i, f := range x.fields { g := y.fields[i] if f.embedded != g.embedded || x.Tag(i) != y.Tag(i) || !f.sameId(g.pkg, g.name, false) || !u.nify(f.typ, g.typ, emode, p) { return false } } return true } } case *Pointer: // Two pointer types unify if their base types unify. if y, ok := y.(*Pointer); ok { return u.nify(x.base, y.base, emode, p) } case *Tuple: // Two tuples types unify if they have the same number of elements // and the types of corresponding elements unify. if y, ok := y.(*Tuple); ok { if x.Len() == y.Len() { if x != nil { for i, v := range x.vars { w := y.vars[i] if !u.nify(v.typ, w.typ, mode, p) { return false } } } return true } } case *Signature: // Two function types unify if they have the same number of parameters // and result values, corresponding parameter and result types unify, // and either both functions are variadic or neither is. // Parameter and result names are not required to match. // TODO(gri) handle type parameters or document why we can ignore them. if y, ok := y.(*Signature); ok { return x.variadic == y.variadic && u.nify(x.params, y.params, emode, p) && u.nify(x.results, y.results, emode, p) } case *Interface: assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch // Two interface types unify if they have the same set of methods with // the same names, and corresponding function types unify. // Lower-case method names from different packages are always different. // The order of the methods is irrelevant. if y, ok := y.(*Interface); ok { xset := x.typeSet() yset := y.typeSet() if xset.comparable != yset.comparable { return false } if !xset.terms.equal(yset.terms) { return false } a := xset.methods b := yset.methods if len(a) == len(b) { // Interface types are the only types where cycles can occur // that are not "terminated" via named types; and such cycles // can only be created via method parameter types that are // anonymous interfaces (directly or indirectly) embedding // the current interface. Example: // // type T interface { // m() interface{T} // } // // If two such (differently named) interfaces are compared, // endless recursion occurs if the cycle is not detected. // // If x and y were compared before, they must be equal // (if they were not, the recursion would have stopped); // search the ifacePair stack for the same pair. // // This is a quadratic algorithm, but in practice these stacks // are extremely short (bounded by the nesting depth of interface // type declarations that recur via parameter types, an extremely // rare occurrence). An alternative implementation might use a // "visited" map, but that is probably less efficient overall. q := &ifacePair{x, y, p} for p != nil { if p.identical(q) { return true // same pair was compared before } p = p.prev } if debug { assertSortedMethods(a) assertSortedMethods(b) } for i, f := range a { g := b[i] if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) { return false } } return true } } case *Map: // Two map types unify if their key and value types unify. if y, ok := y.(*Map); ok { return u.nify(x.key, y.key, emode, p) && u.nify(x.elem, y.elem, emode, p) } case *Chan: // Two channel types unify if their value types unify // and if they have the same direction. // The channel direction is ignored for inexact unification. if y, ok := y.(*Chan); ok { return (mode&exact == 0 || x.dir == y.dir) && u.nify(x.elem, y.elem, emode, p) } case *Named: // Two named types unify if their type names originate in the same type declaration. // If they are instantiated, their type argument lists must unify. if y := asNamed(y); y != nil { // Check type arguments before origins so they unify // even if the origins don't match; for better error // messages (see go.dev/issue/53692). xargs := x.TypeArgs().list() yargs := y.TypeArgs().list() if len(xargs) != len(yargs) { return false } for i, xarg := range xargs { if !u.nify(xarg, yargs[i], mode, p) { return false } } return identicalOrigin(x, y) } case *TypeParam: // x must be an unbound type parameter (see comment above). if debug { assert(u.asBoundTypeParam(x) == nil) } // By definition, a valid type argument must be in the type set of // the respective type constraint. Therefore, the type argument's // underlying type must be in the set of underlying types of that // constraint. If there is a single such underlying type, it's the // constraint's core type. It must match the type argument's under- // lying type, irrespective of whether the actual type argument, // which may be a defined type, is actually in the type set (that // will be determined at instantiation time). // Thus, if we have the core type of an unbound type parameter, // we know the structure of the possible types satisfying such // parameters. Use that core type for further unification // (see go.dev/issue/50755 for a test case). if enableCoreTypeUnification { // Because the core type is always an underlying type, // unification will take care of matching against a // defined or literal type automatically. // If y is also an unbound type parameter, we will end // up here again with x and y swapped, so we don't // need to take care of that case separately. if cx := coreType(x); cx != nil { if traceInference { u.tracef("core %s ≡ %s", xorig, yorig) } // If y is a defined type, it may not match against cx which // is an underlying type (incl. int, string, etc.). Use assign // mode here so that the unifier automatically takes under(y) // if necessary. return u.nify(cx, yorig, assign, p) } } // x != y and there's nothing to do case nil: // avoid a crash in case of nil type default: panic(sprintf(nil, true, "u.nify(%s, %s, %d)", xorig, yorig, mode)) } return false }
go/src/cmd/compile/internal/types2/unify.go/0
{ "file_path": "go/src/cmd/compile/internal/types2/unify.go", "repo_id": "go", "token_count": 9682 }
122
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package walk import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/src" ) func walkSelect(sel *ir.SelectStmt) { lno := ir.SetPos(sel) if sel.Walked() { base.Fatalf("double walkSelect") } sel.SetWalked(true) init := ir.TakeInit(sel) init = append(init, walkSelectCases(sel.Cases)...) sel.Cases = nil sel.Compiled = init walkStmtList(sel.Compiled) base.Pos = lno } func walkSelectCases(cases []*ir.CommClause) []ir.Node { ncas := len(cases) sellineno := base.Pos // optimization: zero-case select if ncas == 0 { return []ir.Node{mkcallstmt("block")} } // optimization: one-case select: single op. if ncas == 1 { cas := cases[0] ir.SetPos(cas) l := cas.Init() if cas.Comm != nil { // not default: n := cas.Comm l = append(l, ir.TakeInit(n)...) switch n.Op() { default: base.Fatalf("select %v", n.Op()) case ir.OSEND: // already ok case ir.OSELRECV2: r := n.(*ir.AssignListStmt) if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) { n = r.Rhs[0] break } r.SetOp(ir.OAS2RECV) } l = append(l, n) } l = append(l, cas.Body...) l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) return l } // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. var dflt *ir.CommClause for _, cas := range cases { ir.SetPos(cas) n := cas.Comm if n == nil { dflt = cas continue } switch n.Op() { case ir.OSEND: n := n.(*ir.SendStmt) n.Value = typecheck.NodAddr(n.Value) n.Value = typecheck.Expr(n.Value) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.Lhs[0]) { n.Lhs[0] = typecheck.NodAddr(n.Lhs[0]) n.Lhs[0] = typecheck.Expr(n.Lhs[0]) } } } // optimization: two-case select but one is default: single non-blocking op. if ncas == 2 && dflt != nil { cas := cases[0] if cas == dflt { cas = cases[1] } n := cas.Comm ir.SetPos(n) r := ir.NewIfStmt(base.Pos, nil, nil, nil) r.SetInit(cas.Init()) var cond ir.Node switch n.Op() { default: base.Fatalf("select %v", n.Op()) case ir.OSEND: // if selectnbsend(c, v) { body } else { default body } n := n.(*ir.SendStmt) ch := n.Chan cond = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value) case ir.OSELRECV2: n := n.(*ir.AssignListStmt) recv := n.Rhs[0].(*ir.UnaryExpr) ch := recv.X elem := n.Lhs[0] if ir.IsBlank(elem) { elem = typecheck.NodNil() } cond = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL]) fn := chanfn("selectnbrecv", 2, ch.Type()) call := mkcall1(fn, fn.Type().ResultsTuple(), r.PtrInit(), elem, ch) as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call}) r.PtrInit().Append(typecheck.Stmt(as)) } r.Cond = typecheck.Expr(cond) r.Body = cas.Body r.Else = append(dflt.Init(), dflt.Body...) return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)} } if dflt != nil { ncas-- } casorder := make([]*ir.CommClause, ncas) nsends, nrecvs := 0, 0 var init []ir.Node // generate sel-struct base.Pos = sellineno selv := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(scasetype(), int64(ncas))) init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil))) // No initialization for order; runtime.selectgo is responsible for that. order := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINT16], 2*int64(ncas))) var pc0, pcs ir.Node if base.Flag.Race { pcs = typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINTPTR], int64(ncas))) pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, 0)))) } else { pc0 = typecheck.NodNil() } // register cases for _, cas := range cases { ir.SetPos(cas) init = append(init, ir.TakeInit(cas)...) n := cas.Comm if n == nil { // default: continue } var i int var c, elem ir.Node switch n.Op() { default: base.Fatalf("select %v", n.Op()) case ir.OSEND: n := n.(*ir.SendStmt) i = nsends nsends++ c = n.Chan elem = n.Value case ir.OSELRECV2: n := n.(*ir.AssignListStmt) nrecvs++ i = ncas - nrecvs recv := n.Rhs[0].(*ir.UnaryExpr) c = recv.X elem = n.Lhs[0] } casorder[i] = cas setField := func(f string, val ir.Node) { r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(base.Pos, int64(i))), typecheck.Lookup(f)), val) init = append(init, typecheck.Stmt(r)) } c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR]) setField("c", c) if !ir.IsBlank(elem) { elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR]) setField("elem", elem) } // TODO(mdempsky): There should be a cleaner way to // handle this. if base.Flag.Race { r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, int64(i))))) init = append(init, r) } } if nsends+nrecvs != ncas { base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas) } // run the select base.Pos = sellineno chosen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) recvOK := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL]) r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil) r.Lhs = []ir.Node{chosen, recvOK} fn := typecheck.LookupRuntime("selectgo") var fnInit ir.Nodes r.Rhs = []ir.Node{mkcall1(fn, fn.Type().ResultsTuple(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))} init = append(init, fnInit...) init = append(init, typecheck.Stmt(r)) // selv, order, and pcs (if race) are no longer alive after selectgo. // dispatch cases dispatch := func(cond ir.Node, cas *ir.CommClause) { var list ir.Nodes if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 { n := n.(*ir.AssignListStmt) if !ir.IsBlank(n.Lhs[1]) { x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK) list.Append(typecheck.Stmt(x)) } } list.Append(cas.Body.Take()...) list.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)) var r ir.Node if cond != nil { cond = typecheck.Expr(cond) cond = typecheck.DefaultLit(cond, nil) r = ir.NewIfStmt(base.Pos, cond, list, nil) } else { r = ir.NewBlockStmt(base.Pos, list) } init = append(init, r) } if dflt != nil { ir.SetPos(dflt) dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(base.Pos, 0)), dflt) } for i, cas := range casorder { ir.SetPos(cas) if i == len(casorder)-1 { dispatch(nil, cas) break } dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(base.Pos, int64(i))), cas) } return init } // bytePtrToIndex returns a Node representing "(*byte)(&n[i])". func bytePtrToIndex(n ir.Node, i int64) ir.Node { s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(base.Pos, i))) t := types.NewPtr(types.Types[types.TUINT8]) return typecheck.ConvNop(s, t) } var scase *types.Type // Keep in sync with src/runtime/select.go. func scasetype() *types.Type { if scase == nil { n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("scase")) scase = types.NewNamed(n) n.SetType(scase) n.SetTypecheck(1) scase.SetUnderlying(types.NewStruct([]*types.Field{ types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]), types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]), })) } return scase }
go/src/cmd/compile/internal/walk/select.go/0
{ "file_path": "go/src/cmd/compile/internal/walk/select.go", "repo_id": "go", "token_count": 3617 }
123
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main // This file contains functions and apis to support the "merge" // subcommand of "go tool covdata". import ( "flag" "fmt" "internal/coverage" "internal/coverage/cmerge" "internal/coverage/decodecounter" "internal/coverage/decodemeta" "internal/coverage/pods" "os" ) var outdirflag *string var pcombineflag *bool func makeMergeOp() covOperation { outdirflag = flag.String("o", "", "Output directory to write") pcombineflag = flag.Bool("pcombine", false, "Combine profiles derived from distinct program executables") m := &mstate{ mm: newMetaMerge(), } return m } // mstate encapsulates state and provides methods for implementing the // merge operation. This type implements the CovDataVisitor interface, // and is designed to be used in concert with the CovDataReader // utility, which abstracts away most of the grubby details of reading // coverage data files. Most of the heavy lifting for merging is done // using apis from 'metaMerge' (this is mainly a wrapper around that // functionality). type mstate struct { mm *metaMerge } func (m *mstate) Usage(msg string) { if len(msg) > 0 { fmt.Fprintf(os.Stderr, "error: %s\n", msg) } fmt.Fprintf(os.Stderr, "usage: go tool covdata merge -i=<directories> -o=<dir>\n\n") flag.PrintDefaults() fmt.Fprintf(os.Stderr, "\nExamples:\n\n") fmt.Fprintf(os.Stderr, " go tool covdata merge -i=dir1,dir2,dir3 -o=outdir\n\n") fmt.Fprintf(os.Stderr, " \tmerges all files in dir1/dir2/dir3\n") fmt.Fprintf(os.Stderr, " \tinto output dir outdir\n") Exit(2) } func (m *mstate) Setup() { if *indirsflag == "" { m.Usage("select input directories with '-i' option") } if *outdirflag == "" { m.Usage("select output directory with '-o' option") } m.mm.SetModeMergePolicy(cmerge.ModeMergeRelaxed) } func (m *mstate) BeginPod(p pods.Pod) { m.mm.beginPod() } func (m *mstate) EndPod(p pods.Pod) { m.mm.endPod(*pcombineflag) } func (m *mstate) BeginCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { dbgtrace(2, "visit counter data file %s dirIdx %d", cdf, dirIdx) m.mm.beginCounterDataFile(cdr) } func (m *mstate) EndCounterDataFile(cdf string, cdr *decodecounter.CounterDataReader, dirIdx int) { } func (m *mstate) VisitFuncCounterData(data decodecounter.FuncPayload) { m.mm.visitFuncCounterData(data) } func (m *mstate) EndCounters() { } func (m *mstate) VisitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { m.mm.visitMetaDataFile(mdf, mfr) } func (m *mstate) BeginPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { dbgtrace(3, "VisitPackage(pk=%d path=%s)", pkgIdx, pd.PackagePath()) m.mm.visitPackage(pd, pkgIdx, *pcombineflag) } func (m *mstate) EndPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32) { } func (m *mstate) VisitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc) { m.mm.visitFunc(pkgIdx, fnIdx, fd, mergeMode, *pcombineflag) } func (m *mstate) Finish() { if *pcombineflag { finalHash := m.mm.emitMeta(*outdirflag, true) m.mm.emitCounters(*outdirflag, finalHash) } }
go/src/cmd/covdata/merge.go/0
{ "file_path": "go/src/cmd/covdata/merge.go", "repo_id": "go", "token_count": 1254 }
124
package html import "fmt" // This file is tested by html_test.go. // The comments below are markers for extracting the annotated source // from the HTML output. // This is a regression test for incorrect sorting of boundaries // that coincide, specifically for empty select clauses. // START f func f() { ch := make(chan int) select { case <-ch: default: } } // END f // https://golang.org/issue/25767 // START g func g() { if false { fmt.Printf("Hello") } } // END g
go/src/cmd/cover/testdata/html/html.go/0
{ "file_path": "go/src/cmd/cover/testdata/html/html.go", "repo_id": "go", "token_count": 160 }
125
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "strings" ) /* * Helpers for building runtime. */ // mkzversion writes zversion.go: // // package sys // // (Nothing right now!) func mkzversion(dir, file string) { var buf strings.Builder writeHeader(&buf) fmt.Fprintf(&buf, "package sys\n") writefile(buf.String(), file, writeSkipSame) } // mkbuildcfg writes internal/buildcfg/zbootstrap.go: // // package buildcfg // // const defaultGOROOT = <goroot> // const defaultGO386 = <go386> // ... // const defaultGOOS = runtime.GOOS // const defaultGOARCH = runtime.GOARCH // // The use of runtime.GOOS and runtime.GOARCH makes sure that // a cross-compiled compiler expects to compile for its own target // system. That is, if on a Mac you do: // // GOOS=linux GOARCH=ppc64 go build cmd/compile // // the resulting compiler will default to generating linux/ppc64 object files. // This is more useful than having it default to generating objects for the // original target (in this example, a Mac). func mkbuildcfg(file string) { var buf strings.Builder writeHeader(&buf) fmt.Fprintf(&buf, "package buildcfg\n") fmt.Fprintln(&buf) fmt.Fprintf(&buf, "import \"runtime\"\n") fmt.Fprintln(&buf) fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) fmt.Fprintf(&buf, "const defaultGOAMD64 = `%s`\n", goamd64) fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) fmt.Fprintf(&buf, "const defaultGOARM64 = `%s`\n", goarm64) fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) fmt.Fprintf(&buf, "const defaultGOPPC64 = `%s`\n", goppc64) fmt.Fprintf(&buf, "const defaultGORISCV64 = `%s`\n", goriscv64) fmt.Fprintf(&buf, "const defaultGOEXPERIMENT = `%s`\n", goexperiment) fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) fmt.Fprintf(&buf, "const defaultGO_LDSO = `%s`\n", defaultldso) fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion()) fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n") fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") writefile(buf.String(), file, writeSkipSame) } // mkobjabi writes cmd/internal/objabi/zbootstrap.go: // // package objabi // // (Nothing right now!) func mkobjabi(file string) { var buf strings.Builder writeHeader(&buf) fmt.Fprintf(&buf, "package objabi\n") writefile(buf.String(), file, writeSkipSame) }
go/src/cmd/dist/buildruntime.go/0
{ "file_path": "go/src/cmd/dist/buildruntime.go", "repo_id": "go", "token_count": 941 }
126
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "flag" "fmt" "io" "os" "os/exec" "path/filepath" "sort" "strconv" "strings" "sync" "time" ) // pathf is fmt.Sprintf for generating paths // (on windows it turns / into \ after the printf). func pathf(format string, args ...interface{}) string { return filepath.Clean(fmt.Sprintf(format, args...)) } // filter returns a slice containing the elements x from list for which f(x) == true. func filter(list []string, f func(string) bool) []string { var out []string for _, x := range list { if f(x) { out = append(out, x) } } return out } // uniq returns a sorted slice containing the unique elements of list. func uniq(list []string) []string { out := make([]string, len(list)) copy(out, list) sort.Strings(out) keep := out[:0] for _, x := range out { if len(keep) == 0 || keep[len(keep)-1] != x { keep = append(keep, x) } } return keep } const ( CheckExit = 1 << iota ShowOutput Background ) var outputLock sync.Mutex // run is like runEnv with no additional environment. func run(dir string, mode int, cmd ...string) string { return runEnv(dir, mode, nil, cmd...) } // runEnv runs the command line cmd in dir with additional environment env. // If mode has ShowOutput set and Background unset, run passes cmd's output to // stdout/stderr directly. Otherwise, run returns cmd's output as a string. // If mode has CheckExit set and the command fails, run calls fatalf. // If mode has Background set, this command is being run as a // Background job. Only bgrun should use the Background mode, // not other callers. func runEnv(dir string, mode int, env []string, cmd ...string) string { if vflag > 1 { errprintf("run: %s\n", strings.Join(cmd, " ")) } xcmd := exec.Command(cmd[0], cmd[1:]...) if env != nil { xcmd.Env = append(os.Environ(), env...) } setDir(xcmd, dir) var data []byte var err error // If we want to show command output and this is not // a background command, assume it's the only thing // running, so we can just let it write directly stdout/stderr // as it runs without fear of mixing the output with some // other command's output. Not buffering lets the output // appear as it is printed instead of once the command exits. // This is most important for the invocation of 'go build -v bootstrap/...'. if mode&(Background|ShowOutput) == ShowOutput { xcmd.Stdout = os.Stdout xcmd.Stderr = os.Stderr err = xcmd.Run() } else { data, err = xcmd.CombinedOutput() } if err != nil && mode&CheckExit != 0 { outputLock.Lock() if len(data) > 0 { xprintf("%s\n", data) } outputLock.Unlock() if mode&Background != 0 { // Prevent fatalf from waiting on our own goroutine's // bghelper to exit: bghelpers.Done() } fatalf("FAILED: %v: %v", strings.Join(cmd, " "), err) } if mode&ShowOutput != 0 { outputLock.Lock() os.Stdout.Write(data) outputLock.Unlock() } if vflag > 2 { errprintf("run: %s DONE\n", strings.Join(cmd, " ")) } return string(data) } var maxbg = 4 /* maximum number of jobs to run at once */ var ( bgwork = make(chan func(), 1e5) bghelpers sync.WaitGroup dieOnce sync.Once // guards close of dying dying = make(chan struct{}) ) func bginit() { bghelpers.Add(maxbg) for i := 0; i < maxbg; i++ { go bghelper() } } func bghelper() { defer bghelpers.Done() for { select { case <-dying: return case w := <-bgwork: // Dying takes precedence over doing more work. select { case <-dying: return default: w() } } } } // bgrun is like run but runs the command in the background. // CheckExit|ShowOutput mode is implied (since output cannot be returned). // bgrun adds 1 to wg immediately, and calls Done when the work completes. func bgrun(wg *sync.WaitGroup, dir string, cmd ...string) { wg.Add(1) bgwork <- func() { defer wg.Done() run(dir, CheckExit|ShowOutput|Background, cmd...) } } // bgwait waits for pending bgruns to finish. // bgwait must be called from only a single goroutine at a time. func bgwait(wg *sync.WaitGroup) { done := make(chan struct{}) go func() { wg.Wait() close(done) }() select { case <-done: case <-dying: // Don't return to the caller, to avoid reporting additional errors // to the user. select {} } } // xgetwd returns the current directory. func xgetwd() string { wd, err := os.Getwd() if err != nil { fatalf("%s", err) } return wd } // xrealwd returns the 'real' name for the given path. // real is defined as what xgetwd returns in that directory. func xrealwd(path string) string { old := xgetwd() if err := os.Chdir(path); err != nil { fatalf("chdir %s: %v", path, err) } real := xgetwd() if err := os.Chdir(old); err != nil { fatalf("chdir %s: %v", old, err) } return real } // isdir reports whether p names an existing directory. func isdir(p string) bool { fi, err := os.Stat(p) return err == nil && fi.IsDir() } // isfile reports whether p names an existing file. func isfile(p string) bool { fi, err := os.Stat(p) return err == nil && fi.Mode().IsRegular() } // mtime returns the modification time of the file p. func mtime(p string) time.Time { fi, err := os.Stat(p) if err != nil { return time.Time{} } return fi.ModTime() } // readfile returns the content of the named file. func readfile(file string) string { data, err := os.ReadFile(file) if err != nil { fatalf("%v", err) } return string(data) } const ( writeExec = 1 << iota writeSkipSame ) // writefile writes text to the named file, creating it if needed. // if exec is non-zero, marks the file as executable. // If the file already exists and has the expected content, // it is not rewritten, to avoid changing the time stamp. func writefile(text, file string, flag int) { new := []byte(text) if flag&writeSkipSame != 0 { old, err := os.ReadFile(file) if err == nil && bytes.Equal(old, new) { return } } mode := os.FileMode(0666) if flag&writeExec != 0 { mode = 0777 } xremove(file) // in case of symlink tricks by misc/reboot test err := os.WriteFile(file, new, mode) if err != nil { fatalf("%v", err) } } // xmkdir creates the directory p. func xmkdir(p string) { err := os.Mkdir(p, 0777) if err != nil { fatalf("%v", err) } } // xmkdirall creates the directory p and its parents, as needed. func xmkdirall(p string) { err := os.MkdirAll(p, 0777) if err != nil { fatalf("%v", err) } } // xremove removes the file p. func xremove(p string) { if vflag > 2 { errprintf("rm %s\n", p) } os.Remove(p) } // xremoveall removes the file or directory tree rooted at p. func xremoveall(p string) { if vflag > 2 { errprintf("rm -r %s\n", p) } os.RemoveAll(p) } // xreaddir replaces dst with a list of the names of the files and subdirectories in dir. // The names are relative to dir; they are not full paths. func xreaddir(dir string) []string { f, err := os.Open(dir) if err != nil { fatalf("%v", err) } defer f.Close() names, err := f.Readdirnames(-1) if err != nil { fatalf("reading %s: %v", dir, err) } return names } // xworkdir creates a new temporary directory to hold object files // and returns the name of that directory. func xworkdir() string { name, err := os.MkdirTemp(os.Getenv("GOTMPDIR"), "go-tool-dist-") if err != nil { fatalf("%v", err) } return name } // fatalf prints an error message to standard error and exits. func fatalf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, "go tool dist: %s\n", fmt.Sprintf(format, args...)) dieOnce.Do(func() { close(dying) }) // Wait for background goroutines to finish, // so that exit handler that removes the work directory // is not fighting with active writes or open files. bghelpers.Wait() xexit(2) } var atexits []func() // xexit exits the process with return code n. func xexit(n int) { for i := len(atexits) - 1; i >= 0; i-- { atexits[i]() } os.Exit(n) } // xatexit schedules the exit-handler f to be run when the program exits. func xatexit(f func()) { atexits = append(atexits, f) } // xprintf prints a message to standard output. func xprintf(format string, args ...interface{}) { fmt.Printf(format, args...) } // errprintf prints a message to standard output. func errprintf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) } // xsamefile reports whether f1 and f2 are the same file (or dir). func xsamefile(f1, f2 string) bool { fi1, err1 := os.Stat(f1) fi2, err2 := os.Stat(f2) if err1 != nil || err2 != nil { return f1 == f2 } return os.SameFile(fi1, fi2) } func xgetgoarm() string { // If we're building on an actual arm system, and not building // a cross-compiling toolchain, try to exec ourselves // to detect whether VFP is supported and set the default GOARM. // Windows requires ARMv7, so we can skip the check. // We've always assumed Android is ARMv7 too. if gohostarch == "arm" && goarch == "arm" && goos == gohostos && goos != "windows" && goos != "android" { // Try to exec ourselves in a mode to detect VFP support. // Seeing how far it gets determines which instructions failed. // The test is OS-agnostic. out := run("", 0, os.Args[0], "-check-goarm") v1ok := strings.Contains(out, "VFPv1 OK.") v3ok := strings.Contains(out, "VFPv3 OK.") if v1ok && v3ok { return "7" } if v1ok { return "6" } return "5" } // Otherwise, in the absence of local information, assume GOARM=7. // // We used to assume GOARM=5 in certain contexts but not others, // which produced inconsistent results. For example if you cross-compiled // for linux/arm from a windows/amd64 machine, you got GOARM=7 binaries, // but if you cross-compiled for linux/arm from a linux/amd64 machine, // you got GOARM=5 binaries. Now the default is independent of the // host operating system, for better reproducibility of builds. return "7" } func min(a, b int) int { if a < b { return a } return b } // elfIsLittleEndian detects if the ELF file is little endian. func elfIsLittleEndian(fn string) bool { // read the ELF file header to determine the endianness without using the // debug/elf package. file, err := os.Open(fn) if err != nil { fatalf("failed to open file to determine endianness: %v", err) } defer file.Close() var hdr [16]byte if _, err := io.ReadFull(file, hdr[:]); err != nil { fatalf("failed to read ELF header to determine endianness: %v", err) } // hdr[5] is EI_DATA byte, 1 is ELFDATA2LSB and 2 is ELFDATA2MSB switch hdr[5] { default: fatalf("unknown ELF endianness of %s: EI_DATA = %d", fn, hdr[5]) case 1: return true case 2: return false } panic("unreachable") } // count is a flag.Value that is like a flag.Bool and a flag.Int. // If used as -name, it increments the count, but -name=x sets the count. // Used for verbose flag -v. type count int func (c *count) String() string { return fmt.Sprint(int(*c)) } func (c *count) Set(s string) error { switch s { case "true": *c++ case "false": *c = 0 default: n, err := strconv.Atoi(s) if err != nil { return fmt.Errorf("invalid count %q", s) } *c = count(n) } return nil } func (c *count) IsBoolFlag() bool { return true } func xflagparse(maxargs int) { flag.Var((*count)(&vflag), "v", "verbosity") flag.Parse() if maxargs >= 0 && flag.NArg() > maxargs { flag.Usage() } }
go/src/cmd/dist/util.go/0
{ "file_path": "go/src/cmd/dist/util.go", "repo_id": "go", "token_count": 4241 }
127
//go:build ignore // +build ignore // Ignored package package nested
go/src/cmd/doc/testdata/nested/ignore.go/0
{ "file_path": "go/src/cmd/doc/testdata/nested/ignore.go", "repo_id": "go", "token_count": 20 }
128
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "go/ast" ) func init() { register(jniFix) } var jniFix = fix{ name: "jni", date: "2017-12-04", f: jnifix, desc: `Fixes initializers of JNI's jobject and subtypes`, disabled: false, } // Old state: // // type jobject *_jobject // // New state: // // type jobject uintptr // // and similar for subtypes of jobject. // This fix finds nils initializing these types and replaces the nils with 0s. func jnifix(f *ast.File) bool { return typefix(f, func(s string) bool { switch s { case "C.jobject": return true case "C.jclass": return true case "C.jthrowable": return true case "C.jstring": return true case "C.jarray": return true case "C.jbooleanArray": return true case "C.jbyteArray": return true case "C.jcharArray": return true case "C.jshortArray": return true case "C.jintArray": return true case "C.jlongArray": return true case "C.jfloatArray": return true case "C.jdoubleArray": return true case "C.jobjectArray": return true case "C.jweak": return true } return false }) }
go/src/cmd/fix/jnitype.go/0
{ "file_path": "go/src/cmd/fix/jnitype.go", "repo_id": "go", "token_count": 523 }
129
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main_test import ( "bytes" "debug/elf" "debug/macho" "debug/pe" "encoding/binary" "flag" "fmt" "go/format" "internal/godebug" "internal/platform" "internal/testenv" "io" "io/fs" "log" "math" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "testing" "time" "cmd/go/internal/base" "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/gover" "cmd/go/internal/robustio" "cmd/go/internal/search" "cmd/go/internal/toolchain" "cmd/go/internal/vcs" "cmd/go/internal/vcweb/vcstest" "cmd/go/internal/web" "cmd/go/internal/work" "cmd/internal/sys" cmdgo "cmd/go" ) func init() { // GOVCS defaults to public:git|hg,private:all, // which breaks many tests here - they can't use non-git, non-hg VCS at all! // Change to fully permissive. // The tests of the GOVCS setting itself are in ../../testdata/script/govcs.txt. os.Setenv("GOVCS", "*:all") } var ( canRace = false // whether we can run the race detector canMSan = false // whether we can run the memory sanitizer canASan = false // whether we can run the address sanitizer ) var ( goHostOS, goHostArch string cgoEnabled string // raw value from 'go env CGO_ENABLED' ) // netTestSem is a semaphore limiting the number of tests that may use the // external network in parallel. If non-nil, it contains one buffer slot per // test (send to acquire), with a low enough limit that the overall number of // connections (summed across subprocesses) stays at or below base.NetLimit. var netTestSem chan struct{} var exeSuffix string = func() string { if runtime.GOOS == "windows" { return ".exe" } return "" }() func tooSlow(t *testing.T, reason string) { if testing.Short() { t.Helper() t.Skipf("skipping test in -short mode: %s", reason) } } // testGOROOT is the GOROOT to use when running testgo, a cmd/go binary // build from this process's current GOROOT, but run from a different // (temp) directory. var testGOROOT string var testGOCACHE string var testGo string var testTmpDir string var testBin string // The TestMain function creates a go command for testing purposes and // deletes it after the tests have been run. func TestMain(m *testing.M) { // When CMDGO_TEST_RUN_MAIN is set, we're reusing the test binary as cmd/go. // Enable the special behavior needed in cmd/go/internal/work, // run the main func exported via export_test.go, and exit. // We set CMDGO_TEST_RUN_MAIN via os.Setenv and testScript.setup. if os.Getenv("CMDGO_TEST_RUN_MAIN") != "" { cfg.SetGOROOT(cfg.GOROOT, true) gover.TestVersion = os.Getenv("TESTGO_VERSION") toolchain.TestVersionSwitch = os.Getenv("TESTGO_VERSION_SWITCH") if v := os.Getenv("TESTGO_TOOLCHAIN_VERSION"); v != "" { work.ToolchainVersion = v } if testGOROOT := os.Getenv("TESTGO_GOROOT"); testGOROOT != "" { // Disallow installs to the GOROOT from which testgo was built. // Installs to other GOROOTs — such as one set explicitly within a test — are ok. work.AllowInstall = func(a *work.Action) error { if cfg.BuildN { return nil } rel := search.InDir(a.Target, testGOROOT) if rel == "" { return nil } callerPos := "" if _, file, line, ok := runtime.Caller(1); ok { if shortFile := search.InDir(file, filepath.Join(testGOROOT, "src")); shortFile != "" { file = shortFile } callerPos = fmt.Sprintf("%s:%d: ", file, line) } notice := "This error error can occur if GOROOT is stale, in which case rerunning make.bash will fix it." return fmt.Errorf("%stestgo must not write to GOROOT (installing to %s) (%v)", callerPos, filepath.Join("GOROOT", rel), notice) } } if vcsTestHost := os.Getenv("TESTGO_VCSTEST_HOST"); vcsTestHost != "" { vcs.VCSTestRepoURL = "http://" + vcsTestHost vcs.VCSTestHosts = vcstest.Hosts vcsTestTLSHost := os.Getenv("TESTGO_VCSTEST_TLS_HOST") vcsTestClient, err := vcstest.TLSClient(os.Getenv("TESTGO_VCSTEST_CERT")) if err != nil { fmt.Fprintf(os.Stderr, "loading certificates from $TESTGO_VCSTEST_CERT: %v", err) } var interceptors []web.Interceptor for _, host := range vcstest.Hosts { interceptors = append(interceptors, web.Interceptor{Scheme: "http", FromHost: host, ToHost: vcsTestHost}, web.Interceptor{Scheme: "https", FromHost: host, ToHost: vcsTestTLSHost, Client: vcsTestClient}) } web.EnableTestHooks(interceptors) } cmdgo.Main() os.Exit(0) } os.Setenv("CMDGO_TEST_RUN_MAIN", "true") // $GO_GCFLAGS a compiler debug flag known to cmd/dist, make.bash, etc. // It is not a standard go command flag; use os.Getenv, not cfg.Getenv. if os.Getenv("GO_GCFLAGS") != "" { fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n") fmt.Printf("SKIP\n") return } flag.Parse() if *proxyAddr != "" { StartProxy() select {} } // Run with a temporary TMPDIR to check that the tests don't // leave anything behind. topTmpdir, err := os.MkdirTemp("", "cmd-go-test-") if err != nil { log.Fatal(err) } if !*testWork { defer removeAll(topTmpdir) } else { fmt.Fprintf(os.Stderr, "TESTWORK: preserving top level tempdir %s\n", topTmpdir) } os.Setenv(tempEnvName(), topTmpdir) dir, err := os.MkdirTemp(topTmpdir, "tmpdir") if err != nil { log.Fatal(err) } testTmpDir = dir if !*testWork { defer removeAll(testTmpDir) } testGOCACHE, _ = cache.DefaultDir() if testenv.HasGoBuild() { testBin = filepath.Join(testTmpDir, "testbin") if err := os.Mkdir(testBin, 0777); err != nil { log.Fatal(err) } testGo = filepath.Join(testBin, "go"+exeSuffix) gotool, err := testenv.GoTool() if err != nil { fmt.Fprintln(os.Stderr, "locating go tool: ", err) os.Exit(2) } goEnv := func(name string) string { out, err := exec.Command(gotool, "env", name).CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out) os.Exit(2) } return strings.TrimSpace(string(out)) } testGOROOT = goEnv("GOROOT") os.Setenv("TESTGO_GOROOT", testGOROOT) os.Setenv("GOROOT", testGOROOT) // The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH // toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH). // The testgo.exe we are about to create will be built for GOOS/GOARCH, // which means it will use the GOOS/GOARCH toolchain // (installed in GOROOT/pkg/tool/GOOS_GOARCH). // If these are not the same toolchain, then the entire standard library // will look out of date (the compilers in those two different tool directories // are built for different architectures and have different build IDs), // which will cause many tests to do unnecessary rebuilds and some // tests to attempt to overwrite the installed standard library. // Bail out entirely in this case. goHostOS = goEnv("GOHOSTOS") os.Setenv("TESTGO_GOHOSTOS", goHostOS) goHostArch = goEnv("GOHOSTARCH") os.Setenv("TESTGO_GOHOSTARCH", goHostArch) cgoEnabled = goEnv("CGO_ENABLED") // Duplicate the test executable into the path at testGo, for $PATH. // If the OS supports symlinks, use them instead of copying bytes. testExe, err := os.Executable() if err != nil { log.Fatal(err) } if err := os.Symlink(testExe, testGo); err != nil { // Otherwise, copy the bytes. src, err := os.Open(testExe) if err != nil { log.Fatal(err) } defer src.Close() dst, err := os.OpenFile(testGo, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o777) if err != nil { log.Fatal(err) } _, err = io.Copy(dst, src) if closeErr := dst.Close(); err == nil { err = closeErr } if err != nil { log.Fatal(err) } } out, err := exec.Command(gotool, "env", "GOCACHE").CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "could not find testing GOCACHE: %v\n%s", err, out) os.Exit(2) } testGOCACHE = strings.TrimSpace(string(out)) canMSan = testenv.HasCGO() && platform.MSanSupported(runtime.GOOS, runtime.GOARCH) canASan = testenv.HasCGO() && platform.ASanSupported(runtime.GOOS, runtime.GOARCH) canRace = testenv.HasCGO() && platform.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) // The race detector doesn't work on Alpine Linux: // golang.org/issue/14481 // gccgo does not support the race detector. if isAlpineLinux() || runtime.Compiler == "gccgo" { canRace = false } } if n, limited := base.NetLimit(); limited && n > 0 { // Split the network limit into chunks, so that each parallel script can // have one chunk. We want to run as many parallel scripts as possible, but // also want to give each script as high a limit as possible. // We arbitrarily split by sqrt(n) to try to balance those two goals. netTestLimit := int(math.Sqrt(float64(n))) netTestSem = make(chan struct{}, netTestLimit) reducedLimit := fmt.Sprintf(",%s=%d", base.NetLimitGodebug.Name(), n/netTestLimit) os.Setenv("GODEBUG", os.Getenv("GODEBUG")+reducedLimit) } // Don't let these environment variables confuse the test. os.Setenv("GOENV", "off") os.Unsetenv("GOFLAGS") os.Unsetenv("GOBIN") os.Unsetenv("GOPATH") os.Unsetenv("GIT_ALLOW_PROTOCOL") os.Setenv("HOME", "/test-go-home-does-not-exist") // On some systems the default C compiler is ccache. // Setting HOME to a non-existent directory will break // those systems. Disable ccache and use real compiler. Issue 17668. os.Setenv("CCACHE_DISABLE", "1") if cfg.Getenv("GOCACHE") == "" { os.Setenv("GOCACHE", testGOCACHE) // because $HOME is gone } if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" { // To help diagnose https://go.dev/issue/52545, // enable tracing for Git HTTPS requests. os.Setenv("GIT_TRACE_CURL", "1") os.Setenv("GIT_TRACE_CURL_NO_DATA", "1") os.Setenv("GIT_REDACT_COOKIES", "o,SSO,GSSO_Uberproxy") } r := m.Run() if !*testWork { removeAll(testTmpDir) // os.Exit won't run defer } if !*testWork { // There shouldn't be anything left in topTmpdir. var extraFiles, extraDirs []string err := filepath.WalkDir(topTmpdir, func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if path == topTmpdir { return nil } if rel, err := filepath.Rel(topTmpdir, path); err == nil { path = rel } if d.IsDir() { extraDirs = append(extraDirs, path) } else { extraFiles = append(extraFiles, path) } return nil }) if err != nil { log.Fatal(err) } if len(extraFiles) > 0 { log.Fatalf("unexpected files left in tmpdir: %q", extraFiles) } else if len(extraDirs) > 0 { log.Fatalf("unexpected subdirectories left in tmpdir: %q", extraDirs) } removeAll(topTmpdir) } os.Exit(r) } func isAlpineLinux() bool { if runtime.GOOS != "linux" { return false } fi, err := os.Lstat("/etc/alpine-release") return err == nil && fi.Mode().IsRegular() } // The length of an mtime tick on this system. This is an estimate of // how long we need to sleep to ensure that the mtime of two files is // different. // We used to try to be clever but that didn't always work (see golang.org/issue/12205). var mtimeTick time.Duration = 1 * time.Second // Manage a single run of the testgo binary. type testgoData struct { t *testing.T temps []string env []string tempdir string ran bool inParallel bool stdout, stderr bytes.Buffer execDir string // dir for tg.run } // skipIfGccgo skips the test if using gccgo. func skipIfGccgo(t *testing.T, msg string) { if runtime.Compiler == "gccgo" { t.Skipf("skipping test not supported on gccgo: %s", msg) } } // testgo sets up for a test that runs testgo. func testgo(t *testing.T) *testgoData { t.Helper() testenv.MustHaveGoBuild(t) testenv.SkipIfShortAndSlow(t) return &testgoData{t: t} } // must gives a fatal error if err is not nil. func (tg *testgoData) must(err error) { tg.t.Helper() if err != nil { tg.t.Fatal(err) } } // check gives a test non-fatal error if err is not nil. func (tg *testgoData) check(err error) { tg.t.Helper() if err != nil { tg.t.Error(err) } } // parallel runs the test in parallel by calling t.Parallel. func (tg *testgoData) parallel() { tg.t.Helper() if tg.ran { tg.t.Fatal("internal testsuite error: call to parallel after run") } for _, e := range tg.env { if strings.HasPrefix(e, "GOROOT=") || strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") { val := e[strings.Index(e, "=")+1:] if strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata") { tg.t.Fatalf("internal testsuite error: call to parallel with testdata in environment (%s)", e) } } } tg.inParallel = true tg.t.Parallel() } // pwd returns the current directory. func (tg *testgoData) pwd() string { tg.t.Helper() wd, err := os.Getwd() if err != nil { tg.t.Fatalf("could not get working directory: %v", err) } return wd } // sleep sleeps for one tick, where a tick is a conservative estimate // of how long it takes for a file modification to get a different // mtime. func (tg *testgoData) sleep() { time.Sleep(mtimeTick) } // setenv sets an environment variable to use when running the test go // command. func (tg *testgoData) setenv(name, val string) { tg.t.Helper() tg.unsetenv(name) tg.env = append(tg.env, name+"="+val) } // unsetenv removes an environment variable. func (tg *testgoData) unsetenv(name string) { if tg.env == nil { tg.env = append([]string(nil), os.Environ()...) tg.env = append(tg.env, "GO111MODULE=off", "TESTGONETWORK=panic") if testing.Short() { tg.env = append(tg.env, "TESTGOVCS=panic") } } for i, v := range tg.env { if strings.HasPrefix(v, name+"=") { tg.env = append(tg.env[:i], tg.env[i+1:]...) break } } } func (tg *testgoData) goTool() string { return testGo } // doRun runs the test go command, recording stdout and stderr and // returning exit status. func (tg *testgoData) doRun(args []string) error { tg.t.Helper() if tg.inParallel { for _, arg := range args { if strings.HasPrefix(arg, "testdata") || strings.HasPrefix(arg, "./testdata") { tg.t.Fatal("internal testsuite error: parallel run using testdata") } } } hasGoroot := false for _, v := range tg.env { if strings.HasPrefix(v, "GOROOT=") { hasGoroot = true break } } prog := tg.goTool() if !hasGoroot { tg.setenv("GOROOT", testGOROOT) } tg.t.Logf("running testgo %v", args) cmd := testenv.Command(tg.t, prog, args...) tg.stdout.Reset() tg.stderr.Reset() cmd.Dir = tg.execDir cmd.Stdout = &tg.stdout cmd.Stderr = &tg.stderr cmd.Env = tg.env status := cmd.Run() if tg.stdout.Len() > 0 { tg.t.Log("standard output:") tg.t.Log(tg.stdout.String()) } if tg.stderr.Len() > 0 { tg.t.Log("standard error:") tg.t.Log(tg.stderr.String()) } tg.ran = true return status } // run runs the test go command, and expects it to succeed. func (tg *testgoData) run(args ...string) { tg.t.Helper() if status := tg.doRun(args); status != nil { wd, _ := os.Getwd() tg.t.Logf("go %v failed unexpectedly in %s: %v", args, wd, status) tg.t.FailNow() } } // runFail runs the test go command, and expects it to fail. func (tg *testgoData) runFail(args ...string) { tg.t.Helper() if status := tg.doRun(args); status == nil { tg.t.Fatal("testgo succeeded unexpectedly") } else { tg.t.Log("testgo failed as expected:", status) } } // getStdout returns standard output of the testgo run as a string. func (tg *testgoData) getStdout() string { tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: stdout called before run") } return tg.stdout.String() } // getStderr returns standard error of the testgo run as a string. func (tg *testgoData) getStderr() string { tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: stdout called before run") } return tg.stderr.String() } // doGrepMatch looks for a regular expression in a buffer, and returns // whether it is found. The regular expression is matched against // each line separately, as with the grep command. func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool { tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: grep called before run") } re := regexp.MustCompile(match) for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) { if re.Match(ln) { return true } } return false } // doGrep looks for a regular expression in a buffer and fails if it // is not found. The name argument is the name of the output we are // searching, "output" or "error". The msg argument is logged on // failure. func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) { tg.t.Helper() if !tg.doGrepMatch(match, b) { tg.t.Log(msg) tg.t.Logf("pattern %v not found in standard %s", match, name) tg.t.FailNow() } } // grepStdout looks for a regular expression in the test run's // standard output and fails, logging msg, if it is not found. func (tg *testgoData) grepStdout(match, msg string) { tg.t.Helper() tg.doGrep(match, &tg.stdout, "output", msg) } // grepStderr looks for a regular expression in the test run's // standard error and fails, logging msg, if it is not found. func (tg *testgoData) grepStderr(match, msg string) { tg.t.Helper() tg.doGrep(match, &tg.stderr, "error", msg) } // grepBoth looks for a regular expression in the test run's standard // output or stand error and fails, logging msg, if it is not found. func (tg *testgoData) grepBoth(match, msg string) { tg.t.Helper() if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) { tg.t.Log(msg) tg.t.Logf("pattern %v not found in standard output or standard error", match) tg.t.FailNow() } } // doGrepNot looks for a regular expression in a buffer and fails if // it is found. The name and msg arguments are as for doGrep. func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) { tg.t.Helper() if tg.doGrepMatch(match, b) { tg.t.Log(msg) tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name) tg.t.FailNow() } } // grepStdoutNot looks for a regular expression in the test run's // standard output and fails, logging msg, if it is found. func (tg *testgoData) grepStdoutNot(match, msg string) { tg.t.Helper() tg.doGrepNot(match, &tg.stdout, "output", msg) } // grepStderrNot looks for a regular expression in the test run's // standard error and fails, logging msg, if it is found. func (tg *testgoData) grepStderrNot(match, msg string) { tg.t.Helper() tg.doGrepNot(match, &tg.stderr, "error", msg) } // grepBothNot looks for a regular expression in the test run's // standard output or standard error and fails, logging msg, if it is // found. func (tg *testgoData) grepBothNot(match, msg string) { tg.t.Helper() if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) { tg.t.Log(msg) tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match) } } // doGrepCount counts the number of times a regexp is seen in a buffer. func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int { tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: doGrepCount called before run") } re := regexp.MustCompile(match) c := 0 for _, ln := range bytes.Split(b.Bytes(), []byte{'\n'}) { if re.Match(ln) { c++ } } return c } // grepCountBoth returns the number of times a regexp is seen in both // standard output and standard error. func (tg *testgoData) grepCountBoth(match string) int { tg.t.Helper() return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr) } // creatingTemp records that the test plans to create a temporary file // or directory. If the file or directory exists already, it will be // removed. When the test completes, the file or directory will be // removed if it exists. func (tg *testgoData) creatingTemp(path string) { tg.t.Helper() if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) { tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path) } tg.must(robustio.RemoveAll(path)) tg.temps = append(tg.temps, path) } // makeTempdir makes a temporary directory for a run of testgo. If // the temporary directory was already created, this does nothing. func (tg *testgoData) makeTempdir() { tg.t.Helper() if tg.tempdir == "" { var err error tg.tempdir, err = os.MkdirTemp("", "gotest") tg.must(err) } } // tempFile adds a temporary file for a run of testgo. func (tg *testgoData) tempFile(path, contents string) { tg.t.Helper() tg.makeTempdir() tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755)) bytes := []byte(contents) if strings.HasSuffix(path, ".go") { formatted, err := format.Source(bytes) if err == nil { bytes = formatted } } tg.must(os.WriteFile(filepath.Join(tg.tempdir, path), bytes, 0644)) } // tempDir adds a temporary directory for a run of testgo. func (tg *testgoData) tempDir(path string) { tg.t.Helper() tg.makeTempdir() if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) { tg.t.Fatal(err) } } // path returns the absolute pathname to file with the temporary // directory. func (tg *testgoData) path(name string) string { tg.t.Helper() if tg.tempdir == "" { tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name) } if name == "." { return tg.tempdir } return filepath.Join(tg.tempdir, name) } // mustExist fails if path does not exist. func (tg *testgoData) mustExist(path string) { tg.t.Helper() if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { tg.t.Fatalf("%s does not exist but should", path) } tg.t.Fatalf("%s stat failed: %v", path, err) } } // mustNotExist fails if path exists. func (tg *testgoData) mustNotExist(path string) { tg.t.Helper() if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { tg.t.Fatalf("%s exists but should not (%v)", path, err) } } // wantExecutable fails with msg if path is not executable. func (tg *testgoData) wantExecutable(path, msg string) { tg.t.Helper() if st, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { tg.t.Log(err) } tg.t.Fatal(msg) } else { if runtime.GOOS != "windows" && st.Mode()&0111 == 0 { tg.t.Fatalf("binary %s exists but is not executable", path) } } } // isStale reports whether pkg is stale, and why func (tg *testgoData) isStale(pkg string) (bool, string) { tg.t.Helper() tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg) v := strings.TrimSpace(tg.getStdout()) f := strings.SplitN(v, ":", 2) if len(f) == 2 { switch f[0] { case "true": return true, f[1] case "false": return false, f[1] } } tg.t.Fatalf("unexpected output checking staleness of package %v: %v", pkg, v) panic("unreachable") } // wantStale fails with msg if pkg is not stale. func (tg *testgoData) wantStale(pkg, reason, msg string) { tg.t.Helper() stale, why := tg.isStale(pkg) if !stale { tg.t.Fatal(msg) } // We always accept the reason as being "not installed but // available in build cache", because when that is the case go // list doesn't try to sort out the underlying reason why the // package is not installed. if reason == "" && why != "" || !strings.Contains(why, reason) && !strings.Contains(why, "not installed but available in build cache") { tg.t.Errorf("wrong reason for Stale=true: %q, want %q", why, reason) } } // wantNotStale fails with msg if pkg is stale. func (tg *testgoData) wantNotStale(pkg, reason, msg string) { tg.t.Helper() stale, why := tg.isStale(pkg) if stale { tg.t.Fatal(msg) } if reason == "" && why != "" || !strings.Contains(why, reason) { tg.t.Errorf("wrong reason for Stale=false: %q, want %q", why, reason) } } // If -testwork is specified, the test prints the name of the temp directory // and does not remove it when done, so that a programmer can // poke at the test file tree afterward. var testWork = flag.Bool("testwork", false, "") // cleanup cleans up a test that runs testgo. func (tg *testgoData) cleanup() { tg.t.Helper() if *testWork { if tg.tempdir != "" { tg.t.Logf("TESTWORK=%s\n", tg.path(".")) } return } for _, path := range tg.temps { tg.check(removeAll(path)) } if tg.tempdir != "" { tg.check(removeAll(tg.tempdir)) } } func removeAll(dir string) error { // module cache has 0444 directories; // make them writable in order to remove content. filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { // chmod not only directories, but also things that we couldn't even stat // due to permission errors: they may also be unreadable directories. if err != nil || info.IsDir() { os.Chmod(path, 0777) } return nil }) return robustio.RemoveAll(dir) } func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { if testing.Short() { t.Skip("skipping lengthy test in short mode") } tg := testgo(t) defer tg.cleanup() tg.parallel() // Set GOCACHE to an empty directory so that a previous run of // this test does not affect the staleness of the packages it builds. tg.tempDir("gocache") tg.setenv("GOCACHE", tg.path("gocache")) // Copy the runtime packages into a temporary GOROOT // so that we can change files. var dirs []string tg.run("list", "-deps", "runtime") pkgs := strings.Split(strings.TrimSpace(tg.getStdout()), "\n") for _, pkg := range pkgs { dirs = append(dirs, filepath.Join("src", pkg)) } dirs = append(dirs, filepath.Join("pkg/tool", goHostOS+"_"+goHostArch), "pkg/include", ) for _, copydir := range dirs { srcdir := filepath.Join(testGOROOT, copydir) tg.tempDir(filepath.Join("goroot", copydir)) err := filepath.WalkDir(srcdir, func(path string, info fs.DirEntry, err error) error { if err != nil { return err } if info.IsDir() { return nil } srcrel, err := filepath.Rel(srcdir, path) if err != nil { return err } dest := filepath.Join("goroot", copydir, srcrel) if _, err := os.Stat(dest); err == nil { return nil } data, err := os.ReadFile(path) if err != nil { return err } tg.tempFile(dest, string(data)) if strings.Contains(copydir, filepath.Join("pkg", "tool")) { os.Chmod(tg.path(dest), 0777) } return nil }) if err != nil { t.Fatal(err) } } tg.setenv("GOROOT", tg.path("goroot")) addVar := func(name string, idx int) (restore func()) { data, err := os.ReadFile(name) if err != nil { t.Fatal(err) } old := data data = append(data, fmt.Sprintf("var DummyUnusedVar%d bool\n", idx)...) if err := os.WriteFile(name, append(data, '\n'), 0666); err != nil { t.Fatal(err) } tg.sleep() return func() { if err := os.WriteFile(name, old, 0666); err != nil { t.Fatal(err) } } } // Every main package depends on the "runtime". tg.tempFile("d1/src/p1/p1.go", `package main; func main(){}`) tg.setenv("GOPATH", tg.path("d1")) // Pass -i flag to rebuild everything outdated. tg.run("install", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes") // Changing mtime of internal/runtime/sys/sys.go // should have no effect: only the content matters. // In fact this should be true even outside a release branch. sys := tg.path("goroot/src/internal/runtime/sys/sys.go") tg.sleep() restore := addVar(sys, 0) restore() tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of internal/runtime/sys/sys.go") // But changing content of any file should have an effect. // Previously zversion.go was the only one that mattered; // now they all matter, so keep using sys.go. restore = addVar(sys, 1) defer restore() tg.wantStale("p1", "stale dependency: internal/runtime/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go") restore() tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release") addVar(sys, 2) tg.wantStale("p1", "stale dependency: internal/runtime/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again") tg.run("install", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release") // Restore to "old" release. restore() tg.wantStale("p1", "not installed but available in build cache", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go") tg.run("install", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release") } func TestPackageMainTestCompilerFlags(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.path(".")) tg.tempFile("src/p1/p1.go", "package main\n") tg.tempFile("src/p1/p1_test.go", "package main\nimport \"testing\"\nfunc Test(t *testing.T){}\n") tg.run("test", "-c", "-n", "p1") tg.grepBothNot(`([\\/]compile|gccgo).* (-p main|-fgo-pkgpath=main).*p1\.go`, "should not have run compile -p main p1.go") tg.grepStderr(`([\\/]compile|gccgo).* (-p p1|-fgo-pkgpath=p1).*p1\.go`, "should have run compile -p p1 p1.go") } // Issue 4104. func TestGoTestWithPackageListedMultipleTimes(t *testing.T) { tooSlow(t, "links and runs a test") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("test", "errors", "errors", "errors", "errors", "errors") if strings.Contains(strings.TrimSpace(tg.getStdout()), "\n") { t.Error("go test errors errors errors errors errors tested the same package multiple times") } } func TestGoListHasAConsistentOrder(t *testing.T) { tooSlow(t, "walks all of GOROOT/src twice") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("list", "std") first := tg.getStdout() tg.run("list", "std") if first != tg.getStdout() { t.Error("go list std ordering is inconsistent") } } func TestGoListStdDoesNotIncludeCommands(t *testing.T) { tooSlow(t, "walks all of GOROOT/src") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("list", "std") tg.grepStdoutNot("cmd/", "go list std shows commands") } func TestGoListCmdOnlyShowsCommands(t *testing.T) { skipIfGccgo(t, "gccgo does not have GOROOT") tooSlow(t, "walks all of GOROOT/src/cmd") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("list", "cmd") out := strings.TrimSpace(tg.getStdout()) for _, line := range strings.Split(out, "\n") { if !strings.Contains(line, "cmd/") { t.Error("go list cmd shows non-commands") break } } } func TestGoListDeps(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempDir("src/p1/p2/p3/p4") tg.setenv("GOPATH", tg.path(".")) tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n") tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n") tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n") tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n") tg.run("list", "-f", "{{.Deps}}", "p1") tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4") tg.run("list", "-deps", "p1") tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4") if runtime.Compiler != "gccgo" { // Check the list is in dependency order. tg.run("list", "-deps", "math") want := "unsafe\ninternal/cpu\nmath/bits\nmath\n" out := tg.stdout.String() if !strings.Contains(out, "internal/cpu") { // Some systems don't use internal/cpu. want = "unsafe\nmath/bits\nmath\n" } if tg.stdout.String() != want { t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want) } } } func TestGoListTest(t *testing.T) { skipIfGccgo(t, "gccgo does not have standard packages") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOCACHE", tg.tempdir) tg.run("list", "-test", "-deps", "bytes") tg.grepStdout(`^bytes.test$`, "missing test main") tg.grepStdout(`^bytes$`, "missing real bytes") tg.grepStdout(`^bytes \[bytes.test\]$`, "missing test copy of bytes") tg.grepStdout(`^testing \[bytes.test\]$`, "missing test copy of testing") tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing") tg.run("list", "-test", "bytes") tg.grepStdout(`^bytes.test$`, "missing test main") tg.grepStdout(`^bytes$`, "missing real bytes") tg.grepStdout(`^bytes \[bytes.test\]$`, "unexpected test copy of bytes") tg.grepStdoutNot(`^testing \[bytes.test\]$`, "unexpected test copy of testing") tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing") tg.run("list", "-test", "cmd/buildid", "cmd/doc") tg.grepStdout(`^cmd/buildid$`, "missing cmd/buildid") tg.grepStdout(`^cmd/doc$`, "missing cmd/doc") tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test") tg.grepStdoutNot(`^cmd/buildid\.test$`, "unexpected cmd/buildid test") tg.grepStdoutNot(`^testing`, "unexpected testing") tg.run("list", "-test", "runtime/cgo") tg.grepStdout(`^runtime/cgo$`, "missing runtime/cgo") tg.run("list", "-deps", "-f", "{{if .DepOnly}}{{.ImportPath}}{{end}}", "sort") tg.grepStdout(`^internal/reflectlite$`, "missing internal/reflectlite") tg.grepStdoutNot(`^sort`, "unexpected sort") } func TestGoListCompiledCgo(t *testing.T) { tooSlow(t, "compiles cgo files") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOCACHE", tg.tempdir) tg.run("list", "-f", `{{join .CgoFiles "\n"}}`, "net") if tg.stdout.String() == "" { t.Skip("net does not use cgo") } if strings.Contains(tg.stdout.String(), tg.tempdir) { t.Fatalf(".CgoFiles unexpectedly mentioned cache %s", tg.tempdir) } tg.run("list", "-compiled", "-f", `{{.Dir}}{{"\n"}}{{join .CompiledGoFiles "\n"}}`, "net") if !strings.Contains(tg.stdout.String(), tg.tempdir) { t.Fatalf(".CompiledGoFiles with -compiled did not mention cache %s", tg.tempdir) } dir := "" for _, file := range strings.Split(tg.stdout.String(), "\n") { if file == "" { continue } if dir == "" { dir = file continue } if !strings.Contains(file, "/") && !strings.Contains(file, `\`) { file = filepath.Join(dir, file) } if _, err := os.Stat(file); err != nil { t.Fatalf("cannot find .CompiledGoFiles result %s: %v", file, err) } } } func TestGoListExport(t *testing.T) { skipIfGccgo(t, "gccgo does not have standard packages") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOCACHE", tg.tempdir) tg.run("list", "-f", "{{.Export}}", "strings") if tg.stdout.String() != "" { t.Fatalf(".Export without -export unexpectedly set") } tg.run("list", "-export", "-f", "{{.Export}}", "strings") file := strings.TrimSpace(tg.stdout.String()) if file == "" { t.Fatalf(".Export with -export was empty") } if _, err := os.Stat(file); err != nil { t.Fatalf("cannot find .Export result %s: %v", file, err) } tg.run("list", "-export", "-f", "{{.BuildID}}", "strings") buildID := strings.TrimSpace(tg.stdout.String()) if buildID == "" { t.Fatalf(".BuildID with -export was empty") } tg.run("tool", "buildid", file) toolBuildID := strings.TrimSpace(tg.stdout.String()) if buildID != toolBuildID { t.Fatalf(".BuildID with -export %q disagrees with 'go tool buildid' %q", buildID, toolBuildID) } } // Issue 4096. Validate the output of unsuccessful go install foo/quxx. func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.runFail("install", "foo/quxx") if tg.grepCountBoth(`cannot find package "foo/quxx" in any of`) != 1 { t.Error(`go install foo/quxx expected error: .*cannot find package "foo/quxx" in any of`) } } func TestGOROOTSearchFailureReporting(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.runFail("install", "foo/quxx") if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("foo", "quxx"))+` \(from \$GOROOT\)$`) != 1 { t.Error(`go install foo/quxx expected error: .*foo/quxx (from $GOROOT)`) } } func TestMultipleGOPATHEntriesReportedSeparately(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() sep := string(filepath.ListSeparator) tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) tg.runFail("install", "foo/quxx") if tg.grepCountBoth(`testdata[/\\].[/\\]src[/\\]foo[/\\]quxx`) != 2 { t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)\n.*testdata/b/src/foo/quxx`) } } // Test (from $GOPATH) annotation is reported for the first GOPATH entry, func TestMentionGOPATHInFirstGOPATHEntry(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() sep := string(filepath.ListSeparator) tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) tg.runFail("install", "foo/quxx") if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "a", "src", "foo", "quxx"))+` \(from \$GOPATH\)$`) != 1 { t.Error(`go install foo/quxx expected error: .*testdata/a/src/foo/quxx (from $GOPATH)`) } } // but not on the second. func TestMentionGOPATHNotOnSecondEntry(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() sep := string(filepath.ListSeparator) tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata", "a")+sep+filepath.Join(tg.pwd(), "testdata", "b")) tg.runFail("install", "foo/quxx") if tg.grepCountBoth(regexp.QuoteMeta(filepath.Join("testdata", "b", "src", "foo", "quxx"))+`$`) != 1 { t.Error(`go install foo/quxx expected error: .*testdata/b/src/foo/quxx`) } } func homeEnvName() string { switch runtime.GOOS { case "windows": return "USERPROFILE" case "plan9": return "home" default: return "HOME" } } func tempEnvName() string { switch runtime.GOOS { case "windows": return "TMP" case "plan9": return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine default: return "TMPDIR" } } func pathEnvName() string { switch runtime.GOOS { case "plan9": return "path" default: return "PATH" } } func TestDefaultGOPATH(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempDir("home/go") tg.setenv(homeEnvName(), tg.path("home")) // Set TEST_TELEMETRY_DIR to a path that doesn't exist // so that the counter uploading code doesn't write // the counter token file to the temp dir after the test finishes. tg.setenv("TEST_TELEMETRY_DIR", "/no-telemetry-dir") tg.run("env", "GOPATH") tg.grepStdout(regexp.QuoteMeta(tg.path("home/go")), "want GOPATH=$HOME/go") tg.setenv("GOROOT", tg.path("home/go")) tg.run("env", "GOPATH") tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go") tg.setenv("GOROOT", tg.path("home/go")+"/") tg.run("env", "GOPATH") tg.grepStdoutNot(".", "want unset GOPATH because GOROOT=$HOME/go/") } func TestDefaultGOPATHPrintedSearchList(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.setenv("GOPATH", "") tg.tempDir("home") tg.setenv(homeEnvName(), tg.path("home")) // Set TEST_TELEMETRY_DIR to a path that doesn't exist // so that the counter uploading code doesn't write // the counter token file to the temp dir after the test finishes. tg.setenv("TEST_TELEMETRY_DIR", "/no-telemetry-dir") tg.runFail("install", "github.com/golang/example/hello") tg.grepStderr(regexp.QuoteMeta(tg.path("home/go/src/github.com/golang/example/hello"))+`.*from \$GOPATH`, "expected default GOPATH") } func TestLdflagsArgumentsWithSpacesIssue3941(t *testing.T) { skipIfGccgo(t, "gccgo does not support -ldflags -X") tooSlow(t, "compiles and links a binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("main.go", `package main var extern string func main() { println(extern) }`) tg.run("run", "-ldflags", `-X "main.extern=hello world"`, tg.path("main.go")) tg.grepStderr("^hello world", `ldflags -X "main.extern=hello world"' failed`) } func TestLdFlagsLongArgumentsIssue42295(t *testing.T) { // Test the extremely long command line arguments that contain '\n' characters // get encoded and passed correctly. skipIfGccgo(t, "gccgo does not support -ldflags -X") tooSlow(t, "compiles and links a binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("main.go", `package main var extern string func main() { print(extern) }`) testStr := "test test test test test \n\\ " var buf strings.Builder for buf.Len() < sys.ExecArgLengthLimit+1 { buf.WriteString(testStr) } tg.run("run", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go")) if tg.stderr.String() != buf.String() { t.Errorf("strings differ") } } func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) { skipIfGccgo(t, "gccgo has no standard packages") tooSlow(t, "compiles and links a test binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.run("test", "-c", "-o", tg.path("myerrors.test"+exeSuffix), "errors") tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -c -o myerrors.test did not create myerrors.test") } func TestGoTestDashOWritesBinary(t *testing.T) { skipIfGccgo(t, "gccgo has no standard packages") tooSlow(t, "compiles and runs a test binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.run("test", "-o", tg.path("myerrors.test"+exeSuffix), "errors") tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test") } // Issue 4515. func TestInstallWithTags(t *testing.T) { tooSlow(t, "compiles and links binaries") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempDir("bin") tg.tempFile("src/example/a/main.go", `package main func main() {}`) tg.tempFile("src/example/b/main.go", `// +build mytag package main func main() {}`) tg.setenv("GOPATH", tg.path(".")) tg.run("install", "-tags", "mytag", "example/a", "example/b") tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/a example/b did not install binaries") tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/a example/b did not install binaries") tg.must(os.Remove(tg.path("bin/a" + exeSuffix))) tg.must(os.Remove(tg.path("bin/b" + exeSuffix))) tg.run("install", "-tags", "mytag", "example/...") tg.wantExecutable(tg.path("bin/a"+exeSuffix), "go install example/... did not install binaries") tg.wantExecutable(tg.path("bin/b"+exeSuffix), "go install example/... did not install binaries") tg.run("list", "-tags", "mytag", "example/b...") if strings.TrimSpace(tg.getStdout()) != "example/b" { t.Error("go list example/b did not find example/b") } } // Issue 17451, 17662. func TestSymlinkWarning(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.path(".")) tg.tempDir("src/example/xx") tg.tempDir("yy/zz") tg.tempFile("yy/zz/zz.go", "package zz\n") if err := os.Symlink(tg.path("yy"), tg.path("src/example/xx/yy")); err != nil { t.Skipf("symlink failed: %v", err) } tg.run("list", "example/xx/z...") tg.grepStdoutNot(".", "list should not have matched anything") tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages") tg.grepStderrNot("symlink", "list should not have reported symlink") tg.run("list", "example/xx/...") tg.grepStdoutNot(".", "list should not have matched anything") tg.grepStderr("matched no packages", "list should have reported that pattern matched no packages") tg.grepStderr("ignoring symlink", "list should have reported symlink") } func TestCgoShowsFullPathNames(t *testing.T) { testenv.MustHaveCGO(t) tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/x/y/dirname/foo.go", ` package foo import "C" func f() {`) tg.setenv("GOPATH", tg.path(".")) tg.runFail("build", "x/y/dirname") tg.grepBoth("x/y/dirname", "error did not use full path") } func TestCgoHandlesWlORIGIN(t *testing.T) { tooSlow(t, "compiles cgo files") testenv.MustHaveCGO(t) tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/origin/origin.go", `package origin // #cgo !darwin,!windows LDFLAGS: -Wl,-rpath,$ORIGIN // void f(void) {} import "C" func f() { C.f() }`) tg.setenv("GOPATH", tg.path(".")) tg.run("build", "origin") } func TestCgoPkgConfig(t *testing.T) { tooSlow(t, "compiles cgo files") testenv.MustHaveCGO(t) tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("env", "PKG_CONFIG") pkgConfig := strings.TrimSpace(tg.getStdout()) testenv.MustHaveExecPath(t, pkgConfig) if out, err := testenv.Command(t, pkgConfig, "--atleast-pkgconfig-version", "0.24").CombinedOutput(); err != nil { t.Skipf("%s --atleast-pkgconfig-version 0.24: %v\n%s", pkgConfig, err, out) } // OpenBSD's pkg-config is strict about whitespace and only // supports backslash-escaped whitespace. It does not support // quotes, which the normal freedesktop.org pkg-config does // support. See https://man.openbsd.org/pkg-config.1 tg.tempFile("foo.pc", ` Name: foo Description: The foo library Version: 1.0.0 Cflags: -Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world `) tg.tempFile("foo.go", `package main /* #cgo pkg-config: foo int value() { return DEFINED_FROM_PKG_CONFIG; } */ import "C" import "os" func main() { if C.value() != 42 { println("value() =", C.value(), "wanted 42") os.Exit(1) } } `) tg.setenv("PKG_CONFIG_PATH", tg.path(".")) tg.run("run", tg.path("foo.go")) // test for ldflags tg.tempFile("bar.pc", ` Name: bar Description: The bar library Version: 1.0.0 Libs: -Wl,-rpath=/path\ with\ spaces/bin `) tg.tempFile("bar.go", `package main /* #cgo pkg-config: bar */ import "C" func main() {} `) tg.run("run", tg.path("bar.go")) } func TestListTemplateContextFunction(t *testing.T) { t.Parallel() for _, tt := range []struct { v string want string }{ {"GOARCH", runtime.GOARCH}, {"GOOS", runtime.GOOS}, {"GOROOT", testGOROOT}, {"GOPATH", os.Getenv("GOPATH")}, {"CgoEnabled", ""}, {"UseAllFiles", ""}, {"Compiler", ""}, {"BuildTags", ""}, {"ReleaseTags", ""}, {"InstallSuffix", ""}, } { tt := tt t.Run(tt.v, func(t *testing.T) { tg := testgo(t) tg.parallel() defer tg.cleanup() tmpl := "{{context." + tt.v + "}}" tg.run("list", "-f", tmpl) if tt.want == "" { return } if got := strings.TrimSpace(tg.getStdout()); got != tt.want { t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want) } }) } } // Test that you cannot use a local import in a package // accessed by a non-local import (found in a GOPATH/GOROOT). // See golang.org/issue/17475. func TestImportLocal(t *testing.T) { tooSlow(t, "builds a lot of sequential packages") tg := testgo(t) tg.parallel() defer tg.cleanup() tg.tempFile("src/dir/x/x.go", `package x var X int `) tg.setenv("GOPATH", tg.path(".")) tg.run("build", "dir/x") // Ordinary import should work. tg.tempFile("src/dir/p0/p.go", `package p0 import "dir/x" var _ = x.X `) tg.run("build", "dir/p0") // Relative import should not. tg.tempFile("src/dir/p1/p.go", `package p1 import "../x" var _ = x.X `) tg.runFail("build", "dir/p1") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in a test. tg.tempFile("src/dir/p2/p.go", `package p2 `) tg.tempFile("src/dir/p2/p_test.go", `package p2 import "../x" import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/p2") tg.runFail("test", "dir/p2") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in an xtest. tg.tempFile("src/dir/p2/p_test.go", `package p2_test import "../x" import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/p2") tg.runFail("test", "dir/p2") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // Relative import starting with ./ should not work either. tg.tempFile("src/dir/d.go", `package dir import "./x" var _ = x.X `) tg.runFail("build", "dir") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in a test. tg.tempFile("src/dir/d.go", `package dir `) tg.tempFile("src/dir/d_test.go", `package dir import "./x" import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir") tg.runFail("test", "dir") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in an xtest. tg.tempFile("src/dir/d_test.go", `package dir_test import "./x" import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir") tg.runFail("test", "dir") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // Relative import plain ".." should not work. tg.tempFile("src/dir/x/y/y.go", `package dir import ".." var _ = x.X `) tg.runFail("build", "dir/x/y") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in a test. tg.tempFile("src/dir/x/y/y.go", `package y `) tg.tempFile("src/dir/x/y/y_test.go", `package y import ".." import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/x/y") tg.runFail("test", "dir/x/y") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // ... even in an x test. tg.tempFile("src/dir/x/y/y_test.go", `package y_test import ".." import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/x/y") tg.runFail("test", "dir/x/y") tg.grepStderr("local import.*in non-local package", "did not diagnose local import") // Relative import "." should not work. tg.tempFile("src/dir/x/xx.go", `package x import "." var _ = x.X `) tg.runFail("build", "dir/x") tg.grepStderr("cannot import current directory", "did not diagnose import current directory") // ... even in a test. tg.tempFile("src/dir/x/xx.go", `package x `) tg.tempFile("src/dir/x/xx_test.go", `package x import "." import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/x") tg.runFail("test", "dir/x") tg.grepStderr("cannot import current directory", "did not diagnose import current directory") // ... even in an xtest. tg.tempFile("src/dir/x/xx.go", `package x `) tg.tempFile("src/dir/x/xx_test.go", `package x_test import "." import "testing" var _ = x.X func TestFoo(t *testing.T) {} `) tg.run("build", "dir/x") tg.runFail("test", "dir/x") tg.grepStderr("cannot import current directory", "did not diagnose import current directory") } func TestGoInstallPkgdir(t *testing.T) { skipIfGccgo(t, "gccgo has no standard packages") tooSlow(t, "builds a package with cgo dependencies") // Only the stdlib packages that use cgo have install // targets, (we're using net below) so cgo is required // for the install. testenv.MustHaveCGO(t) tg := testgo(t) tg.parallel() tg.setenv("GODEBUG", "installgoroot=all") defer tg.cleanup() tg.makeTempdir() pkg := tg.path(".") tg.run("install", "-pkgdir", pkg, "net") tg.mustExist(filepath.Join(pkg, "net.a")) tg.mustNotExist(filepath.Join(pkg, "runtime/cgo.a")) } // For issue 14337. func TestParallelTest(t *testing.T) { tooSlow(t, "links and runs test binaries") tg := testgo(t) tg.parallel() defer tg.cleanup() tg.makeTempdir() const testSrc = `package package_test import ( "testing" ) func TestTest(t *testing.T) { }` tg.tempFile("src/p1/p1_test.go", strings.Replace(testSrc, "package_test", "p1_test", 1)) tg.tempFile("src/p2/p2_test.go", strings.Replace(testSrc, "package_test", "p2_test", 1)) tg.tempFile("src/p3/p3_test.go", strings.Replace(testSrc, "package_test", "p3_test", 1)) tg.tempFile("src/p4/p4_test.go", strings.Replace(testSrc, "package_test", "p4_test", 1)) tg.setenv("GOPATH", tg.path(".")) tg.run("test", "-p=4", "p1", "p2", "p3", "p4") } func TestBinaryOnlyPackages(t *testing.T) { tooSlow(t, "compiles several packages sequentially") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.path(".")) tg.tempFile("src/p1/p1.go", `//go:binary-only-package package p1 `) tg.wantStale("p1", "binary-only packages are no longer supported", "p1 is binary-only, and this message should always be printed") tg.runFail("install", "p1") tg.grepStderr("binary-only packages are no longer supported", "did not report attempt to compile binary-only package") tg.tempFile("src/p1/p1.go", ` package p1 import "fmt" func F(b bool) { fmt.Printf("hello from p1\n"); if b { F(false) } } `) tg.run("install", "p1") os.Remove(tg.path("src/p1/p1.go")) tg.mustNotExist(tg.path("src/p1/p1.go")) tg.tempFile("src/p2/p2.go", `//go:binary-only-packages-are-not-great package p2 import "p1" func F() { p1.F(true) } `) tg.runFail("install", "p2") tg.grepStderr("no Go files", "did not complain about missing sources") tg.tempFile("src/p1/missing.go", `//go:binary-only-package package p1 import _ "fmt" func G() `) tg.wantStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)") tg.runFail("install", "p2") tg.grepStderr("p1: binary-only packages are no longer supported", "did not report error for binary-only p1") tg.run("list", "-deps", "-f", "{{.ImportPath}}: {{.BinaryOnly}}", "p2") tg.grepStdout("p1: true", "p1 not listed as BinaryOnly") tg.grepStdout("p2: false", "p2 listed as BinaryOnly") } // Issue 16050 and 21884. func TestLinkSysoFiles(t *testing.T) { if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { t.Skip("not linux/amd64") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempDir("src/syso") tg.tempFile("src/syso/a.syso", ``) tg.tempFile("src/syso/b.go", `package syso`) tg.setenv("GOPATH", tg.path(".")) // We should see the .syso file regardless of the setting of // CGO_ENABLED. tg.setenv("CGO_ENABLED", "1") tg.run("list", "-f", "{{.SysoFiles}}", "syso") tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=1") tg.setenv("CGO_ENABLED", "0") tg.run("list", "-f", "{{.SysoFiles}}", "syso") tg.grepStdout("a.syso", "missing syso file with CGO_ENABLED=0") tg.setenv("CGO_ENABLED", "1") tg.run("list", "-msan", "-f", "{{.SysoFiles}}", "syso") tg.grepStdoutNot("a.syso", "unexpected syso file with -msan") } // Issue 16120. func TestGenerateUsesBuildContext(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("this test won't run under Windows") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempDir("src/gen") tg.tempFile("src/gen/gen.go", "package gen\n//go:generate echo $GOOS $GOARCH\n") tg.setenv("GOPATH", tg.path(".")) tg.setenv("GOOS", "linux") tg.setenv("GOARCH", "amd64") tg.run("generate", "gen") tg.grepStdout("linux amd64", "unexpected GOOS/GOARCH combination") tg.setenv("GOOS", "darwin") tg.setenv("GOARCH", "arm64") tg.run("generate", "gen") tg.grepStdout("darwin arm64", "unexpected GOOS/GOARCH combination") } func TestGoEnv(t *testing.T) { tg := testgo(t) tg.parallel() defer tg.cleanup() tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors tg.setenv("GOARCH", "arm") tg.run("env", "GOARCH") tg.grepStdout("^arm$", "GOARCH not honored") tg.run("env", "GCCGO") tg.grepStdout(".", "GCCGO unexpectedly empty") tg.run("env", "CGO_CFLAGS") tg.grepStdout(".", "default CGO_CFLAGS unexpectedly empty") tg.setenv("CGO_CFLAGS", "-foobar") tg.run("env", "CGO_CFLAGS") tg.grepStdout("^-foobar$", "CGO_CFLAGS not honored") tg.setenv("CC", "gcc -fmust -fgo -ffaster") tg.run("env", "CC") tg.grepStdout("gcc", "CC not found") tg.run("env", "GOGCCFLAGS") tg.grepStdout("-ffaster", "CC arguments not found") tg.run("env", "GOVERSION") envVersion := strings.TrimSpace(tg.stdout.String()) tg.run("version") cmdVersion := strings.TrimSpace(tg.stdout.String()) // If 'go version' is "go version <version> <goos>/<goarch>", then // 'go env GOVERSION' is just "<version>". if cmdVersion == envVersion || !strings.Contains(cmdVersion, envVersion) { t.Fatalf("'go env GOVERSION' %q should be a shorter substring of 'go version' %q", envVersion, cmdVersion) } } const ( noMatchesPattern = `(?m)^ok.*\[no tests to run\]` okPattern = `(?m)^ok` ) // Issue 18044. func TestLdBindNow(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.setenv("LD_BIND_NOW", "1") tg.run("help") } // Issue 18225. // This is really a cmd/asm issue but this is a convenient place to test it. func TestConcurrentAsm(t *testing.T) { skipIfGccgo(t, "gccgo does not use cmd/asm") tg := testgo(t) defer tg.cleanup() tg.parallel() asm := `DATA ·constants<>+0x0(SB)/8,$0 GLOBL ·constants<>(SB),8,$8 ` tg.tempFile("go/src/p/a.s", asm) tg.tempFile("go/src/p/b.s", asm) tg.tempFile("go/src/p/p.go", `package p`) tg.setenv("GOPATH", tg.path("go")) tg.run("build", "p") } // Issue 18975. func TestFFLAGS(t *testing.T) { testenv.MustHaveCGO(t) tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("p/src/p/main.go", `package main // #cgo FFLAGS: -no-such-fortran-flag import "C" func main() {} `) tg.tempFile("p/src/p/a.f", `! comment`) tg.setenv("GOPATH", tg.path("p")) // This should normally fail because we are passing an unknown flag, // but issue #19080 points to Fortran compilers that succeed anyhow. // To work either way we call doRun directly rather than run or runFail. tg.doRun([]string{"build", "-x", "p"}) tg.grepStderr("no-such-fortran-flag", `missing expected "-no-such-fortran-flag"`) } // Issue 19198. // This is really a cmd/link issue but this is a convenient place to test it. func TestDuplicateGlobalAsmSymbols(t *testing.T) { skipIfGccgo(t, "gccgo does not use cmd/asm") tooSlow(t, "links a binary with cgo dependencies") if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" { t.Skipf("skipping test on %s", runtime.GOARCH) } testenv.MustHaveCGO(t) tg := testgo(t) defer tg.cleanup() tg.parallel() asm := ` #include "textflag.h" DATA sym<>+0x0(SB)/8,$0 GLOBL sym<>(SB),(NOPTR+RODATA),$8 TEXT ·Data(SB),NOSPLIT,$0 MOVB sym<>(SB), AX MOVB AX, ret+0(FP) RET ` tg.tempFile("go/src/a/a.s", asm) tg.tempFile("go/src/a/a.go", `package a; func Data() uint8`) tg.tempFile("go/src/b/b.s", asm) tg.tempFile("go/src/b/b.go", `package b; func Data() uint8`) tg.tempFile("go/src/p/p.go", ` package main import "a" import "b" import "C" func main() { _ = a.Data() + b.Data() } `) tg.setenv("GOPATH", tg.path("go")) exe := tg.path("p.exe") tg.creatingTemp(exe) tg.run("build", "-o", exe, "p") } func copyFile(src, dst string, perm fs.FileMode) error { sf, err := os.Open(src) if err != nil { return err } defer sf.Close() df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil { return err } _, err = io.Copy(df, sf) err2 := df.Close() if err != nil { return err } return err2 } func TestNeedVersion(t *testing.T) { skipIfGccgo(t, "gccgo does not use cmd/compile") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("goversion.go", `package main; func main() {}`) path := tg.path("goversion.go") tg.setenv("TESTGO_TOOLCHAIN_VERSION", "go1.testgo") tg.runFail("run", path) tg.grepStderr("compile", "does not match go tool version") } func TestBuildmodePIE(t *testing.T) { tooSlow(t, "links binaries") if !platform.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) { t.Skipf("skipping test because buildmode=pie is not supported on %s/%s", runtime.GOOS, runtime.GOARCH) } // Skip on alpine until https://go.dev/issues/54354 resolved. if strings.HasSuffix(testenv.Builder(), "-alpine") { t.Skip("skipping PIE tests on alpine; see https://go.dev/issues/54354") } t.Run("non-cgo", func(t *testing.T) { testBuildmodePIE(t, false, true) }) t.Run("cgo", func(t *testing.T) { testenv.MustHaveCGO(t) testBuildmodePIE(t, true, true) }) } func TestWindowsDefaultBuildmodIsPIE(t *testing.T) { if runtime.GOOS != "windows" { t.Skip("skipping windows only test") } tooSlow(t, "links binaries") t.Run("non-cgo", func(t *testing.T) { testBuildmodePIE(t, false, false) }) t.Run("cgo", func(t *testing.T) { testenv.MustHaveCGO(t) testBuildmodePIE(t, true, false) }) } func testBuildmodePIE(t *testing.T, useCgo, setBuildmodeToPIE bool) { tg := testgo(t) defer tg.cleanup() tg.parallel() var s string if useCgo { s = `import "C";` } tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s)) src := tg.path("main.go") obj := tg.path("main.exe") args := []string{"build"} if setBuildmodeToPIE { args = append(args, "-buildmode=pie") } args = append(args, "-o", obj, src) tg.run(args...) switch runtime.GOOS { case "linux", "android", "freebsd": f, err := elf.Open(obj) if err != nil { t.Fatal(err) } defer f.Close() if f.Type != elf.ET_DYN { t.Errorf("PIE type must be ET_DYN, but %s", f.Type) } case "darwin", "ios": f, err := macho.Open(obj) if err != nil { t.Fatal(err) } defer f.Close() if f.Flags&macho.FlagDyldLink == 0 { t.Error("PIE must have DyldLink flag, but not") } if f.Flags&macho.FlagPIE == 0 { t.Error("PIE must have PIE flag, but not") } case "windows": f, err := pe.Open(obj) if err != nil { t.Fatal(err) } defer f.Close() if f.Section(".reloc") == nil { t.Error(".reloc section is not present") } if (f.FileHeader.Characteristics & pe.IMAGE_FILE_RELOCS_STRIPPED) != 0 { t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set") } var dc uint16 switch oh := f.OptionalHeader.(type) { case *pe.OptionalHeader32: dc = oh.DllCharacteristics case *pe.OptionalHeader64: dc = oh.DllCharacteristics if (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 { t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") } default: t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) } if (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 { t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") } if useCgo { // Test that only one symbol is exported (#40795). // PIE binaries don´t require .edata section but unfortunately // binutils doesn´t generate a .reloc section unless there is // at least one symbol exported. // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 section := f.Section(".edata") if section == nil { t.Skip(".edata section is not present") } // TODO: deduplicate this struct from cmd/link/internal/ld/pe.go type IMAGE_EXPORT_DIRECTORY struct { _ [2]uint32 _ [2]uint16 _ [2]uint32 NumberOfFunctions uint32 NumberOfNames uint32 _ [3]uint32 } var e IMAGE_EXPORT_DIRECTORY if err := binary.Read(section.Open(), binary.LittleEndian, &e); err != nil { t.Fatalf("binary.Read failed: %v", err) } // Only _cgo_dummy_export should be exported if e.NumberOfFunctions != 1 { t.Fatalf("got %d exported functions; want 1", e.NumberOfFunctions) } if e.NumberOfNames != 1 { t.Fatalf("got %d exported names; want 1", e.NumberOfNames) } } default: // testBuildmodePIE opens object files, so it needs to understand the object // file format. t.Skipf("skipping test: test helper does not support %s", runtime.GOOS) } out, err := testenv.Command(t, obj).CombinedOutput() if err != nil { t.Fatal(err) } if string(out) != "hello" { t.Errorf("got %q; want %q", out, "hello") } } func TestUpxCompression(t *testing.T) { if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "386") { t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH) } testenv.MustHaveExecPath(t, "upx") out, err := testenv.Command(t, "upx", "--version").CombinedOutput() if err != nil { t.Fatalf("upx --version failed: %v", err) } // upx --version prints `upx <version>` in the first line of output: // upx 3.94 // [...] re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`) upxVersion := re.FindStringSubmatch(string(out)) if len(upxVersion) != 3 { t.Fatalf("bad upx version string: %s", upxVersion) } major, err1 := strconv.Atoi(upxVersion[1]) minor, err2 := strconv.Atoi(upxVersion[2]) if err1 != nil || err2 != nil { t.Fatalf("bad upx version string: %s", upxVersion[0]) } // Anything below 3.94 is known not to work with go binaries if (major < 3) || (major == 3 && minor < 94) { t.Skipf("skipping because upx version %v.%v is too old", major, minor) } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`) src := tg.path("main.go") obj := tg.path("main") tg.run("build", "-o", obj, src) out, err = testenv.Command(t, "upx", obj).CombinedOutput() if err != nil { t.Logf("executing upx\n%s\n", out) t.Fatalf("upx failed with %v", err) } out, err = testenv.Command(t, obj).CombinedOutput() if err != nil { t.Logf("%s", out) t.Fatalf("running compressed go binary failed with error %s", err) } if string(out) != "hello upx" { t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx") } } var gocacheverify = godebug.New("#gocacheverify") func TestCacheListStale(t *testing.T) { tooSlow(t, "links a binary") if gocacheverify.Value() == "1" { t.Skip("GODEBUG gocacheverify") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOCACHE", tg.path("cache")) tg.tempFile("gopath/src/p/p.go", "package p; import _ \"q\"; func F(){}\n") tg.tempFile("gopath/src/q/q.go", "package q; func F(){}\n") tg.tempFile("gopath/src/m/m.go", "package main; import _ \"q\"; func main(){}\n") tg.setenv("GOPATH", tg.path("gopath")) tg.run("install", "p", "m") tg.run("list", "-f={{.ImportPath}} {{.Stale}}", "m", "q", "p") tg.grepStdout("^m false", "m should not be stale") tg.grepStdout("^q true", "q should be stale") tg.grepStdout("^p false", "p should not be stale") } func TestCacheCoverage(t *testing.T) { tooSlow(t, "links and runs a test binary with coverage enabled") if gocacheverify.Value() == "1" { t.Skip("GODEBUG gocacheverify") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.makeTempdir() tg.setenv("GOCACHE", tg.path("c1")) tg.run("test", "-cover", "-short", "strings") tg.run("test", "-cover", "-short", "math", "strings") } func TestIssue22588(t *testing.T) { // Don't get confused by stderr coming from tools. tg := testgo(t) defer tg.cleanup() tg.parallel() tg.wantNotStale("runtime", "", "must be non-stale to compare staleness under -toolexec") if _, err := os.Stat("/usr/bin/time"); err != nil { t.Skip(err) } tg.run("list", "-f={{.Stale}}", "runtime") tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime") tg.grepStdout("false", "incorrectly reported runtime as stale") } func TestIssue22531(t *testing.T) { tooSlow(t, "links binaries") if gocacheverify.Value() == "1" { t.Skip("GODEBUG gocacheverify") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.tempdir) tg.setenv("GOCACHE", tg.path("cache")) tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n") tg.run("install", "-x", "m") tg.run("list", "-f", "{{.Stale}}", "m") tg.grepStdout("false", "reported m as stale after install") tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix)) // The link action ID did not include the full main build ID, // even though the full main build ID is written into the // eventual binary. That caused the following install to // be a no-op, thinking the gofmt binary was up-to-date, // even though .Stale could see it was not. tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n") tg.run("install", "-x", "m") tg.run("list", "-f", "{{.Stale}}", "m") tg.grepStdout("false", "reported m as stale after reinstall") tg.run("tool", "buildid", tg.path("bin/m"+exeSuffix)) } func TestIssue22596(t *testing.T) { tooSlow(t, "links binaries") if gocacheverify.Value() == "1" { t.Skip("GODEBUG gocacheverify") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOCACHE", tg.path("cache")) tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n") tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n") tg.setenv("GOPATH", tg.path("gopath1")) tg.run("list", "-f={{.Target}}", "p") target1 := strings.TrimSpace(tg.getStdout()) tg.run("install", "p") tg.wantNotStale("p", "", "p stale after install") tg.setenv("GOPATH", tg.path("gopath2")) tg.run("list", "-f={{.Target}}", "p") target2 := strings.TrimSpace(tg.getStdout()) tg.must(os.MkdirAll(filepath.Dir(target2), 0777)) tg.must(copyFile(target1, target2, 0666)) tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1") tg.run("install", "p") tg.wantNotStale("p", "", "p stale after install2") } func TestTestCache(t *testing.T) { tooSlow(t, "links and runs test binaries") if gocacheverify.Value() == "1" { t.Skip("GODEBUG gocacheverify") } tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.tempdir) tg.setenv("GOCACHE", tg.path("cache")) // The -p=1 in the commands below just makes the -x output easier to read. t.Log("\n\nINITIAL\n\n") tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n") tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n") tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n") tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n") tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}") tg.run("test", "-x", "-v", "-short", "t/...") t.Log("\n\nREPEAT\n\n") tg.run("test", "-x", "-v", "-short", "t/...") tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") tg.grepStderrNot(`[\\/](compile|gccgo) `, "incorrectly ran compiler") tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker") tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test") t.Log("\n\nCOMMENT\n\n") // Changing the program text without affecting the compiled package // should result in the package being rebuilt but nothing more. tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n") tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") tg.grepStderrNot(`([\\/](compile|gccgo) ).*t[0-9]_test\.go`, "incorrectly ran compiler") tg.grepStderrNot(`[\\/](link|gccgo) `, "incorrectly ran linker") tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test") t.Log("\n\nCHANGE\n\n") // Changing the actual package should have limited effects. tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n") tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") // p2 should have been rebuilt. tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2") // t1 does not import anything, should not have been rebuilt. tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1") tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test") tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1") // t2 imports p1 and must be rebuilt and relinked, // but the change should not have any effect on the test binary, // so the test should not have been rerun. tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2") tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test") // This check does not currently work with gccgo, as garbage // collection of unused variables is not turned on by default. if runtime.Compiler != "gccgo" { tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2") } // t3 imports p1, and changing X changes t3's test binary. tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3") tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test") tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test") tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result") // t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled, // and not rerun. tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4") tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test") // This check does not currently work with gccgo, as garbage // collection of unused variables is not turned on by default. if runtime.Compiler != "gccgo" { tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4") } } func TestTestSkipVetAfterFailedBuild(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("x_test.go", `package x func f() { return 1 } `) tg.runFail("test", tg.path("x_test.go")) tg.grepStderrNot(`vet`, "vet should be skipped after the failed build") } func TestTestVetRebuild(t *testing.T) { tooSlow(t, "links and runs test binaries") tg := testgo(t) defer tg.cleanup() tg.parallel() // golang.org/issue/23701. // b_test imports b with augmented method from export_test.go. // b_test also imports a, which imports b. // Must not accidentally see un-augmented b propagate through a to b_test. tg.tempFile("src/a/a.go", `package a import "b" type Type struct{} func (*Type) M() b.T {return 0} `) tg.tempFile("src/b/b.go", `package b type T int type I interface {M() T} `) tg.tempFile("src/b/export_test.go", `package b func (*T) Method() *T { return nil } `) tg.tempFile("src/b/b_test.go", `package b_test import ( "testing" "a" . "b" ) func TestBroken(t *testing.T) { x := new(T) x.Method() _ = new(a.Type) } `) tg.setenv("GOPATH", tg.path(".")) tg.run("test", "b") tg.run("vet", "b") } func TestInstallDeps(t *testing.T) { tooSlow(t, "links a binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.makeTempdir() tg.setenv("GOPATH", tg.tempdir) tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n") tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n") tg.run("list", "-f={{.Target}}", "p1") p1 := strings.TrimSpace(tg.getStdout()) tg.run("list", "-f={{.Target}}", "p2") p2 := strings.TrimSpace(tg.getStdout()) tg.run("list", "-f={{.Target}}", "main1") main1 := strings.TrimSpace(tg.getStdout()) tg.run("install", "main1") tg.mustExist(main1) tg.mustNotExist(p2) tg.mustNotExist(p1) tg.run("install", "p2") tg.mustExist(p2) tg.mustNotExist(p1) } // Issue 22986. func TestImportPath(t *testing.T) { tooSlow(t, "links and runs a test binary") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/a/a.go", ` package main import ( "log" p "a/p-1.0" ) func main() { if !p.V { log.Fatal("false") } }`) tg.tempFile("src/a/a_test.go", ` package main_test import ( p "a/p-1.0" "testing" ) func TestV(t *testing.T) { if !p.V { t.Fatal("false") } }`) tg.tempFile("src/a/p-1.0/p.go", ` package p var V = true func init() {} `) tg.setenv("GOPATH", tg.path(".")) tg.run("build", "-o", tg.path("a.exe"), "a") tg.run("test", "a") } func TestBadCommandLines(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/x/x.go", "package x\n") tg.setenv("GOPATH", tg.path(".")) tg.run("build", "x") tg.tempFile("src/x/@y.go", "package x\n") tg.runFail("build", "x") tg.grepStderr("invalid input file name \"@y.go\"", "did not reject @y.go") tg.must(os.Remove(tg.path("src/x/@y.go"))) tg.tempFile("src/x/-y.go", "package x\n") tg.runFail("build", "x") tg.grepStderr("invalid input file name \"-y.go\"", "did not reject -y.go") tg.must(os.Remove(tg.path("src/x/-y.go"))) if runtime.Compiler == "gccgo" { tg.runFail("build", "-gccgoflags=all=@x", "x") } else { tg.runFail("build", "-gcflags=all=@x", "x") } tg.grepStderr("invalid command-line argument @x in command", "did not reject @x during exec") tg.tempFile("src/@x/x.go", "package x\n") tg.setenv("GOPATH", tg.path(".")) tg.runFail("build", "@x") tg.grepStderr("invalid input directory name \"@x\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x directory") tg.tempFile("src/@x/y/y.go", "package y\n") tg.setenv("GOPATH", tg.path(".")) tg.runFail("build", "@x/y") tg.grepStderr("invalid import path \"@x/y\"|can only use path@version syntax with 'go get' and 'go install' in module-aware mode", "did not reject @x/y import path") tg.tempFile("src/-x/x.go", "package x\n") tg.setenv("GOPATH", tg.path(".")) tg.runFail("build", "--", "-x") tg.grepStderr("invalid import path \"-x\"", "did not reject -x import path") tg.tempFile("src/-x/y/y.go", "package y\n") tg.setenv("GOPATH", tg.path(".")) tg.runFail("build", "--", "-x/y") tg.grepStderr("invalid import path \"-x/y\"", "did not reject -x/y import path") } func TestTwoPkgConfigs(t *testing.T) { testenv.MustHaveCGO(t) if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { t.Skipf("no shell scripts on %s", runtime.GOOS) } tooSlow(t, "builds a package with cgo dependencies") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/x/a.go", `package x // #cgo pkg-config: --static a import "C" `) tg.tempFile("src/x/b.go", `package x // #cgo pkg-config: --static a import "C" `) tg.tempFile("pkg-config.sh", `#!/bin/sh echo $* >>`+tg.path("pkg-config.out")) tg.must(os.Chmod(tg.path("pkg-config.sh"), 0755)) tg.setenv("GOPATH", tg.path(".")) tg.setenv("PKG_CONFIG", tg.path("pkg-config.sh")) tg.run("build", "x") out, err := os.ReadFile(tg.path("pkg-config.out")) tg.must(err) out = bytes.TrimSpace(out) want := "--cflags --static --static -- a a\n--libs --static --static -- a a" if !bytes.Equal(out, []byte(want)) { t.Errorf("got %q want %q", out, want) } } func TestCgoCache(t *testing.T) { testenv.MustHaveCGO(t) tooSlow(t, "builds a package with cgo dependencies") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/x/a.go", `package main // #ifndef VAL // #define VAL 0 // #endif // int val = VAL; import "C" import "fmt" func main() { fmt.Println(C.val) } `) tg.setenv("GOPATH", tg.path(".")) exe := tg.path("x.exe") tg.run("build", "-o", exe, "x") tg.setenv("CGO_LDFLAGS", "-lnosuchlibraryexists") tg.runFail("build", "-o", exe, "x") tg.grepStderr(`nosuchlibraryexists`, "did not run linker with changed CGO_LDFLAGS") } // Issue 23982 func TestFilepathUnderCwdFormat(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.run("test", "-x", "-cover", "log") tg.grepStderrNot(`\.log\.cover\.go`, "-x output should contain correctly formatted filepath under cwd") } // Issue 24396. func TestDontReportRemoveOfEmptyDir(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/a/a.go", `package a`) tg.setenv("GOPATH", tg.path(".")) tg.run("install", "-x", "a") tg.run("install", "-x", "a") // The second install should have printed only a WORK= line, // nothing else. if bytes.Count(tg.stdout.Bytes(), []byte{'\n'})+bytes.Count(tg.stderr.Bytes(), []byte{'\n'}) > 1 { t.Error("unnecessary output when installing installed package") } } // Issue 24704. func TestLinkerTmpDirIsDeleted(t *testing.T) { skipIfGccgo(t, "gccgo does not use cmd/link") testenv.MustHaveCGO(t) tooSlow(t, "builds a package with cgo dependencies") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("a.go", `package main; import "C"; func main() {}`) tg.run("build", "-ldflags", "-v", "-o", os.DevNull, tg.path("a.go")) // Find line that has "host link:" in linker output. stderr := tg.getStderr() var hostLinkLine string for _, line := range strings.Split(stderr, "\n") { if !strings.Contains(line, "host link:") { continue } hostLinkLine = line break } if hostLinkLine == "" { t.Fatal(`fail to find with "host link:" string in linker output`) } // Find parameter, like "/tmp/go-link-408556474/go.o" inside of // "host link:" line, and extract temp directory /tmp/go-link-408556474 // out of it. tmpdir := hostLinkLine i := strings.Index(tmpdir, `go.o"`) if i == -1 { t.Fatalf(`fail to find "go.o" in "host link:" line %q`, hostLinkLine) } tmpdir = tmpdir[:i-1] i = strings.LastIndex(tmpdir, `"`) if i == -1 { t.Fatalf(`fail to find " in "host link:" line %q`, hostLinkLine) } tmpdir = tmpdir[i+1:] // Verify that temp directory has been removed. _, err := os.Stat(tmpdir) if err == nil { t.Fatalf("temp directory %q has not been removed", tmpdir) } if !os.IsNotExist(err) { t.Fatalf("Stat(%q) returns unexpected error: %v", tmpdir, err) } } // Issue 25093. func TestCoverpkgTestOnly(t *testing.T) { skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t, "links and runs a test binary with coverage enabled") tg := testgo(t) defer tg.cleanup() tg.parallel() tg.tempFile("src/a/a.go", `package a func F(i int) int { return i*i }`) tg.tempFile("src/atest/a_test.go", ` package a_test import ( "a"; "testing" ) func TestF(t *testing.T) { a.F(2) } `) tg.setenv("GOPATH", tg.path(".")) tg.run("test", "-coverpkg=a", "atest") tg.grepStderrNot("no packages being tested depend on matches", "bad match message") tg.grepStdout("coverage: 100", "no coverage") } // Regression test for golang.org/issue/34499: version command should not crash // when executed in a deleted directory on Linux. func TestExecInDeletedDir(t *testing.T) { switch runtime.GOOS { case "windows", "plan9", "aix", // Fails with "device busy". "solaris", "illumos": // Fails with "invalid argument". t.Skipf("%v does not support removing the current working directory", runtime.GOOS) } tg := testgo(t) defer tg.cleanup() wd, err := os.Getwd() tg.check(err) tg.makeTempdir() tg.check(os.Chdir(tg.tempdir)) defer func() { tg.check(os.Chdir(wd)) }() tg.check(os.Remove(tg.tempdir)) // `go version` should not fail tg.run("version") }
go/src/cmd/go/go_test.go/0
{ "file_path": "go/src/cmd/go/go_test.go", "repo_id": "go", "token_count": 34499 }
130
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix || js || wasip1 package base import ( "os" "syscall" ) var signalsToIgnore = []os.Signal{os.Interrupt, syscall.SIGQUIT} // SignalTrace is the signal to send to make a Go program // crash with a stack trace. var SignalTrace os.Signal = syscall.SIGQUIT
go/src/cmd/go/internal/base/signal_unix.go/0
{ "file_path": "go/src/cmd/go/internal/base/signal_unix.go", "repo_id": "go", "token_count": 145 }
131
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix || windows package envcmd import ( "bytes" "cmd/go/internal/cfg" "fmt" "internal/testenv" "os" "os/exec" "path/filepath" "runtime" "testing" "unicode" ) func FuzzPrintEnvEscape(f *testing.F) { f.Add(`$(echo 'cc"'; echo 'OOPS="oops')`) f.Add("$(echo shell expansion 1>&2)") f.Add("''") f.Add(`C:\"Program Files"\`) f.Add(`\\"Quoted Host"\\share`) f.Add("\xfb") f.Add("0") f.Add("") f.Add("''''''''") f.Add("\r") f.Add("\n") f.Add("E,%") f.Fuzz(func(t *testing.T, s string) { t.Parallel() for _, c := range []byte(s) { if c == 0 { t.Skipf("skipping %q: contains a null byte. Null bytes can't occur in the environment"+ " outside of Plan 9, which has different code path than Windows and Unix that this test"+ " isn't testing.", s) } if c > unicode.MaxASCII { t.Skipf("skipping %#q: contains a non-ASCII character %q", s, c) } if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) { t.Skipf("skipping %#q: contains non-graphic character %q", s, c) } if runtime.GOOS == "windows" && c == '\r' || c == '\n' { t.Skipf("skipping %#q on Windows: contains unescapable character %q", s, c) } } var b bytes.Buffer if runtime.GOOS == "windows" { b.WriteString("@echo off\n") } PrintEnv(&b, []cfg.EnvVar{{Name: "var", Value: s}}, false) var want string if runtime.GOOS == "windows" { fmt.Fprintf(&b, "echo \"%%var%%\"\n") want += "\"" + s + "\"\r\n" } else { fmt.Fprintf(&b, "printf '%%s\\n' \"$var\"\n") want += s + "\n" } scriptfilename := "script.sh" if runtime.GOOS == "windows" { scriptfilename = "script.bat" } var cmd *exec.Cmd if runtime.GOOS == "windows" { scriptfile := filepath.Join(t.TempDir(), scriptfilename) if err := os.WriteFile(scriptfile, b.Bytes(), 0777); err != nil { t.Fatal(err) } cmd = testenv.Command(t, "cmd.exe", "/C", scriptfile) } else { cmd = testenv.Command(t, "sh", "-c", b.String()) } out, err := cmd.Output() t.Log(string(out)) if err != nil { t.Fatal(err) } if string(out) != want { t.Fatalf("output of running PrintEnv script and echoing variable: got: %q, want: %q", string(out), want) } }) }
go/src/cmd/go/internal/envcmd/env_test.go/0
{ "file_path": "go/src/cmd/go/internal/envcmd/env_test.go", "repo_id": "go", "token_count": 1059 }
132
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package help implements the “go help” command. package help import ( "bufio" "fmt" "io" "os" "strings" "text/template" "unicode" "unicode/utf8" "cmd/go/internal/base" "cmd/internal/telemetry/counter" ) var counterErrorsHelpUnknownTopic = counter.New("go/errors:help-unknown-topic") // Help implements the 'help' command. func Help(w io.Writer, args []string) { // 'go help documentation' generates doc.go. if len(args) == 1 && args[0] == "documentation" { fmt.Fprintln(w, "// Copyright 2011 The Go Authors. All rights reserved.") fmt.Fprintln(w, "// Use of this source code is governed by a BSD-style") fmt.Fprintln(w, "// license that can be found in the LICENSE file.") fmt.Fprintln(w) fmt.Fprintln(w, "// Code generated by 'go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs'; DO NOT EDIT.") fmt.Fprintln(w, "// Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one.") fmt.Fprintln(w) buf := new(strings.Builder) PrintUsage(buf, base.Go) usage := &base.Command{Long: buf.String()} cmds := []*base.Command{usage} for _, cmd := range base.Go.Commands { cmds = append(cmds, cmd) cmds = append(cmds, cmd.Commands...) } tmpl(&commentWriter{W: w}, documentationTemplate, cmds) fmt.Fprintln(w, "package main") return } cmd := base.Go Args: for i, arg := range args { for _, sub := range cmd.Commands { if sub.Name() == arg { cmd = sub continue Args } } // helpSuccess is the help command using as many args as possible that would succeed. helpSuccess := "go help" if i > 0 { helpSuccess += " " + strings.Join(args[:i], " ") } counterErrorsHelpUnknownTopic.Inc() fmt.Fprintf(os.Stderr, "go help %s: unknown help topic. Run '%s'.\n", strings.Join(args, " "), helpSuccess) base.SetExitStatus(2) // failed at 'go help cmd' base.Exit() } if len(cmd.Commands) > 0 { PrintUsage(os.Stdout, cmd) } else { tmpl(os.Stdout, helpTemplate, cmd) } // not exit 2: succeeded at 'go help cmd'. return } var usageTemplate = `{{.Long | trim}} Usage: {{.UsageLine}} <command> [arguments] The commands are: {{range .Commands}}{{if or (.Runnable) .Commands}} {{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}} Use "go help{{with .LongName}} {{.}}{{end}} <command>" for more information about a command. {{if eq (.UsageLine) "go"}} Additional help topics: {{range .Commands}}{{if and (not .Runnable) (not .Commands)}} {{.Name | printf "%-15s"}} {{.Short}}{{end}}{{end}} Use "go help{{with .LongName}} {{.}}{{end}} <topic>" for more information about that topic. {{end}} ` var helpTemplate = `{{if .Runnable}}usage: {{.UsageLine}} {{end}}{{.Long | trim}} ` var documentationTemplate = `{{range .}}{{if .Short}}{{.Short | capitalize}} {{end}}{{if .Commands}}` + usageTemplate + `{{else}}{{if .Runnable}}Usage: {{.UsageLine}} {{end}}{{.Long | trim}} {{end}}{{end}}` // commentWriter writes a Go comment to the underlying io.Writer, // using line comment form (//). type commentWriter struct { W io.Writer wroteSlashes bool // Wrote "//" at the beginning of the current line. } func (c *commentWriter) Write(p []byte) (int, error) { var n int for i, b := range p { if !c.wroteSlashes { s := "//" if b != '\n' { s = "// " } if _, err := io.WriteString(c.W, s); err != nil { return n, err } c.wroteSlashes = true } n0, err := c.W.Write(p[i : i+1]) n += n0 if err != nil { return n, err } if b == '\n' { c.wroteSlashes = false } } return len(p), nil } // An errWriter wraps a writer, recording whether a write error occurred. type errWriter struct { w io.Writer err error } func (w *errWriter) Write(b []byte) (int, error) { n, err := w.w.Write(b) if err != nil { w.err = err } return n, err } // tmpl executes the given template text on data, writing the result to w. func tmpl(w io.Writer, text string, data any) { t := template.New("top") t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "capitalize": capitalize}) template.Must(t.Parse(text)) ew := &errWriter{w: w} err := t.Execute(ew, data) if ew.err != nil { // I/O error writing. Ignore write on closed pipe. if strings.Contains(ew.err.Error(), "pipe") { base.SetExitStatus(1) base.Exit() } base.Fatalf("writing output: %v", ew.err) } if err != nil { panic(err) } } func capitalize(s string) string { if s == "" { return s } r, n := utf8.DecodeRuneInString(s) return string(unicode.ToTitle(r)) + s[n:] } func PrintUsage(w io.Writer, cmd *base.Command) { bw := bufio.NewWriter(w) tmpl(bw, usageTemplate, cmd) bw.Flush() }
go/src/cmd/go/internal/help/help.go/0
{ "file_path": "go/src/cmd/go/internal/help/help.go", "repo_id": "go", "token_count": 1880 }
133
package xxxx import "import3"
go/src/cmd/go/internal/imports/testdata/star/x_darwin.go/0
{ "file_path": "go/src/cmd/go/internal/imports/testdata/star/x_darwin.go", "repo_id": "go", "token_count": 11 }
134
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd package filelock import ( "io/fs" "syscall" ) type lockType int16 const ( readLock lockType = syscall.LOCK_SH writeLock lockType = syscall.LOCK_EX ) func lock(f File, lt lockType) (err error) { for { err = syscall.Flock(int(f.Fd()), int(lt)) if err != syscall.EINTR { break } } if err != nil { return &fs.PathError{ Op: lt.String(), Path: f.Name(), Err: err, } } return nil } func unlock(f File) error { return lock(f, syscall.LOCK_UN) }
go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go/0
{ "file_path": "go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go", "repo_id": "go", "token_count": 297 }
135
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package modcmd implements the “go mod” command. package modcmd import ( "cmd/go/internal/base" ) var CmdMod = &base.Command{ UsageLine: "go mod", Short: "module maintenance", Long: `Go mod provides access to operations on modules. Note that support for modules is built into all the go commands, not just 'go mod'. For example, day-to-day adding, removing, upgrading, and downgrading of dependencies should be done using 'go get'. See 'go help modules' for an overview of module functionality. `, Commands: []*base.Command{ cmdDownload, cmdEdit, cmdGraph, cmdInit, cmdTidy, cmdVendor, cmdVerify, cmdWhy, }, }
go/src/cmd/go/internal/modcmd/mod.go/0
{ "file_path": "go/src/cmd/go/internal/modcmd/mod.go", "repo_id": "go", "token_count": 258 }
136
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfetch import ( "archive/zip" "bytes" "context" "crypto/sha256" "encoding/base64" "errors" "fmt" "io" "io/fs" "os" "path/filepath" "sort" "strings" "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/fsys" "cmd/go/internal/gover" "cmd/go/internal/lockedfile" "cmd/go/internal/par" "cmd/go/internal/robustio" "cmd/go/internal/str" "cmd/go/internal/trace" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/dirhash" modzip "golang.org/x/mod/zip" ) var downloadCache par.ErrCache[module.Version, string] // version → directory var ErrToolchain = errors.New("internal error: invalid operation on toolchain module") // Download downloads the specific module version to the // local download cache and returns the name of the directory // corresponding to the root of the module's file tree. func Download(ctx context.Context, mod module.Version) (dir string, err error) { if gover.IsToolchain(mod.Path) { return "", ErrToolchain } if err := checkCacheDir(ctx); err != nil { base.Fatal(err) } // The par.Cache here avoids duplicate work. return downloadCache.Do(mod, func() (string, error) { dir, err := download(ctx, mod) if err != nil { return "", err } checkMod(ctx, mod) // If go.mod exists (not an old legacy module), check version is not too new. if data, err := os.ReadFile(filepath.Join(dir, "go.mod")); err == nil { goVersion := gover.GoModLookup(data, "go") if gover.Compare(goVersion, gover.Local()) > 0 { return "", &gover.TooNewError{What: mod.String(), GoVersion: goVersion} } } else if !errors.Is(err, fs.ErrNotExist) { return "", err } return dir, nil }) } func download(ctx context.Context, mod module.Version) (dir string, err error) { ctx, span := trace.StartSpan(ctx, "modfetch.download "+mod.String()) defer span.Done() dir, err = DownloadDir(ctx, mod) if err == nil { // The directory has already been completely extracted (no .partial file exists). return dir, nil } else if dir == "" || !errors.Is(err, fs.ErrNotExist) { return "", err } // To avoid cluttering the cache with extraneous files, // DownloadZip uses the same lockfile as Download. // Invoke DownloadZip before locking the file. zipfile, err := DownloadZip(ctx, mod) if err != nil { return "", err } unlock, err := lockVersion(ctx, mod) if err != nil { return "", err } defer unlock() ctx, span = trace.StartSpan(ctx, "unzip "+zipfile) defer span.Done() // Check whether the directory was populated while we were waiting on the lock. _, dirErr := DownloadDir(ctx, mod) if dirErr == nil { return dir, nil } _, dirExists := dirErr.(*DownloadDirPartialError) // Clean up any remaining temporary directories created by old versions // (before 1.16), as well as partially extracted directories (indicated by // DownloadDirPartialError, usually because of a .partial file). This is only // safe to do because the lock file ensures that their writers are no longer // active. parentDir := filepath.Dir(dir) tmpPrefix := filepath.Base(dir) + ".tmp-" if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(parentDir), str.QuoteGlob(tmpPrefix)+"*")); err == nil { for _, path := range old { RemoveAll(path) // best effort } } if dirExists { if err := RemoveAll(dir); err != nil { return "", err } } partialPath, err := CachePath(ctx, mod, "partial") if err != nil { return "", err } // Extract the module zip directory at its final location. // // To prevent other processes from reading the directory if we crash, // create a .partial file before extracting the directory, and delete // the .partial file afterward (all while holding the lock). // // Before Go 1.16, we extracted to a temporary directory with a random name // then renamed it into place with os.Rename. On Windows, this failed with // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner) // opened files in the temporary directory. // // Go 1.14.2 and higher respect .partial files. Older versions may use // partially extracted directories. 'go mod verify' can detect this, // and 'go clean -modcache' can fix it. if err := os.MkdirAll(parentDir, 0777); err != nil { return "", err } if err := os.WriteFile(partialPath, nil, 0666); err != nil { return "", err } if err := modzip.Unzip(dir, mod, zipfile); err != nil { fmt.Fprintf(os.Stderr, "-> %s\n", err) if rmErr := RemoveAll(dir); rmErr == nil { os.Remove(partialPath) } return "", err } if err := os.Remove(partialPath); err != nil { return "", err } if !cfg.ModCacheRW { makeDirsReadOnly(dir) } return dir, nil } var downloadZipCache par.ErrCache[module.Version, string] // DownloadZip downloads the specific module version to the // local zip cache and returns the name of the zip file. func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err error) { // The par.Cache here avoids duplicate work. return downloadZipCache.Do(mod, func() (string, error) { zipfile, err := CachePath(ctx, mod, "zip") if err != nil { return "", err } ziphashfile := zipfile + "hash" // Return without locking if the zip and ziphash files exist. if _, err := os.Stat(zipfile); err == nil { if _, err := os.Stat(ziphashfile); err == nil { return zipfile, nil } } // The zip or ziphash file does not exist. Acquire the lock and create them. if cfg.CmdName != "mod download" { vers := mod.Version if mod.Path == "golang.org/toolchain" { // Shorten v0.0.1-go1.13.1.darwin-amd64 to go1.13.1.darwin-amd64 _, vers, _ = strings.Cut(vers, "-") if i := strings.LastIndex(vers, "."); i >= 0 { goos, goarch, _ := strings.Cut(vers[i+1:], "-") vers = vers[:i] + " (" + goos + "/" + goarch + ")" } fmt.Fprintf(os.Stderr, "go: downloading %s\n", vers) } else { fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, vers) } } unlock, err := lockVersion(ctx, mod) if err != nil { return "", err } defer unlock() if err := downloadZip(ctx, mod, zipfile); err != nil { return "", err } return zipfile, nil }) } func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err error) { ctx, span := trace.StartSpan(ctx, "modfetch.downloadZip "+zipfile) defer span.Done() // Double-check that the zipfile was not created while we were waiting for // the lock in DownloadZip. ziphashfile := zipfile + "hash" var zipExists, ziphashExists bool if _, err := os.Stat(zipfile); err == nil { zipExists = true } if _, err := os.Stat(ziphashfile); err == nil { ziphashExists = true } if zipExists && ziphashExists { return nil } // Create parent directories. if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { return err } // Clean up any remaining tempfiles from previous runs. // This is only safe to do because the lock file ensures that their // writers are no longer active. tmpPattern := filepath.Base(zipfile) + "*.tmp" if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil { for _, path := range old { os.Remove(path) // best effort } } // If the zip file exists, the ziphash file must have been deleted // or lost after a file system crash. Re-hash the zip without downloading. if zipExists { return hashZip(mod, zipfile, ziphashfile) } // From here to the os.Rename call below is functionally almost equivalent to // renameio.WriteToFile, with one key difference: we want to validate the // contents of the file (by hashing it) before we commit it. Because the file // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to // validate it: we can't just tee the stream as we write it. f, err := tempFile(ctx, filepath.Dir(zipfile), filepath.Base(zipfile), 0666) if err != nil { return err } defer func() { if err != nil { f.Close() os.Remove(f.Name()) } }() var unrecoverableErr error err = TryProxies(func(proxy string) error { if unrecoverableErr != nil { return unrecoverableErr } repo := Lookup(ctx, proxy, mod.Path) err := repo.Zip(ctx, f, mod.Version) if err != nil { // Zip may have partially written to f before failing. // (Perhaps the server crashed while sending the file?) // Since we allow fallback on error in some cases, we need to fix up the // file to be empty again for the next attempt. if _, err := f.Seek(0, io.SeekStart); err != nil { unrecoverableErr = err return err } if err := f.Truncate(0); err != nil { unrecoverableErr = err return err } } return err }) if err != nil { return err } // Double-check that the paths within the zip file are well-formed. // // TODO(bcmills): There is a similar check within the Unzip function. Can we eliminate one? fi, err := f.Stat() if err != nil { return err } z, err := zip.NewReader(f, fi.Size()) if err != nil { return err } prefix := mod.Path + "@" + mod.Version + "/" for _, f := range z.File { if !strings.HasPrefix(f.Name, prefix) { return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name) } } if err := f.Close(); err != nil { return err } // Hash the zip file and check the sum before renaming to the final location. if err := hashZip(mod, f.Name(), ziphashfile); err != nil { return err } if err := os.Rename(f.Name(), zipfile); err != nil { return err } // TODO(bcmills): Should we make the .zip and .ziphash files read-only to discourage tampering? return nil } // hashZip reads the zip file opened in f, then writes the hash to ziphashfile, // overwriting that file if it exists. // // If the hash does not match go.sum (or the sumdb if enabled), hashZip returns // an error and does not write ziphashfile. func hashZip(mod module.Version, zipfile, ziphashfile string) (err error) { hash, err := dirhash.HashZip(zipfile, dirhash.DefaultHash) if err != nil { return err } if err := checkModSum(mod, hash); err != nil { return err } hf, err := lockedfile.Create(ziphashfile) if err != nil { return err } defer func() { if closeErr := hf.Close(); err == nil && closeErr != nil { err = closeErr } }() if err := hf.Truncate(int64(len(hash))); err != nil { return err } if _, err := hf.WriteAt([]byte(hash), 0); err != nil { return err } return nil } // makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir // and its transitive contents. func makeDirsReadOnly(dir string) { type pathMode struct { path string mode fs.FileMode } var dirs []pathMode // in lexical order filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { if err == nil && d.IsDir() { info, err := d.Info() if err == nil && info.Mode()&0222 != 0 { dirs = append(dirs, pathMode{path, info.Mode()}) } } return nil }) // Run over list backward to chmod children before parents. for i := len(dirs) - 1; i >= 0; i-- { os.Chmod(dirs[i].path, dirs[i].mode&^0222) } } // RemoveAll removes a directory written by Download or Unzip, first applying // any permission changes needed to do so. func RemoveAll(dir string) error { // Module cache has 0555 directories; make them writable in order to remove content. filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { if err != nil { return nil // ignore errors walking in file system } if info.IsDir() { os.Chmod(path, 0777) } return nil }) return robustio.RemoveAll(dir) } var GoSumFile string // path to go.sum; set by package modload var WorkspaceGoSumFiles []string // path to module go.sums in workspace; set by package modload type modSum struct { mod module.Version sum string } var goSum struct { mu sync.Mutex m map[module.Version][]string // content of go.sum file w map[string]map[module.Version][]string // sum file in workspace -> content of that sum file status map[modSum]modSumStatus // state of sums in m overwrite bool // if true, overwrite go.sum without incorporating its contents enabled bool // whether to use go.sum at all } type modSumStatus struct { used, dirty bool } // Reset resets globals in the modfetch package, so previous loads don't affect // contents of go.sum files. func Reset() { GoSumFile = "" WorkspaceGoSumFiles = nil // Uses of lookupCache and downloadCache both can call checkModSum, // which in turn sets the used bit on goSum.status for modules. // Reset them so used can be computed properly. lookupCache = par.Cache[lookupCacheKey, Repo]{} downloadCache = par.ErrCache[module.Version, string]{} // Clear all fields on goSum. It will be initialized later goSum.mu.Lock() goSum.m = nil goSum.w = nil goSum.status = nil goSum.overwrite = false goSum.enabled = false goSum.mu.Unlock() } // initGoSum initializes the go.sum data. // The boolean it returns reports whether the // use of go.sum is now enabled. // The goSum lock must be held. func initGoSum() (bool, error) { if GoSumFile == "" { return false, nil } if goSum.m != nil { return true, nil } goSum.m = make(map[module.Version][]string) goSum.status = make(map[modSum]modSumStatus) goSum.w = make(map[string]map[module.Version][]string) for _, f := range WorkspaceGoSumFiles { goSum.w[f] = make(map[module.Version][]string) _, err := readGoSumFile(goSum.w[f], f) if err != nil { return false, err } } enabled, err := readGoSumFile(goSum.m, GoSumFile) goSum.enabled = enabled return enabled, err } func readGoSumFile(dst map[module.Version][]string, file string) (bool, error) { var ( data []byte err error ) if actualSumFile, ok := fsys.OverlayPath(file); ok { // Don't lock go.sum if it's part of the overlay. // On Plan 9, locking requires chmod, and we don't want to modify any file // in the overlay. See #44700. data, err = os.ReadFile(actualSumFile) } else { data, err = lockedfile.Read(file) } if err != nil && !os.IsNotExist(err) { return false, err } readGoSum(dst, file, data) return true, nil } // emptyGoModHash is the hash of a 1-file tree containing a 0-length go.mod. // A bug caused us to write these into go.sum files for non-modules. // We detect and remove them. const emptyGoModHash = "h1:G7mAYYxgmS0lVkHyy2hEOLQCFB0DlQFTMLWggykrydY=" // readGoSum parses data, which is the content of file, // and adds it to goSum.m. The goSum lock must be held. func readGoSum(dst map[module.Version][]string, file string, data []byte) { lineno := 0 for len(data) > 0 { var line []byte lineno++ i := bytes.IndexByte(data, '\n') if i < 0 { line, data = data, nil } else { line, data = data[:i], data[i+1:] } f := strings.Fields(string(line)) if len(f) == 0 { // blank line; skip it continue } if len(f) != 3 { if cfg.CmdName == "mod tidy" { // ignore malformed line so that go mod tidy can fix go.sum continue } else { base.Fatalf("malformed go.sum:\n%s:%d: wrong number of fields %v\n", file, lineno, len(f)) } } if f[2] == emptyGoModHash { // Old bug; drop it. continue } mod := module.Version{Path: f[0], Version: f[1]} dst[mod] = append(dst[mod], f[2]) } } // HaveSum returns true if the go.sum file contains an entry for mod. // The entry's hash must be generated with a known hash algorithm. // mod.Version may have a "/go.mod" suffix to distinguish sums for // .mod and .zip files. func HaveSum(mod module.Version) bool { goSum.mu.Lock() defer goSum.mu.Unlock() inited, err := initGoSum() if err != nil || !inited { return false } for _, goSums := range goSum.w { for _, h := range goSums[mod] { if !strings.HasPrefix(h, "h1:") { continue } if !goSum.status[modSum{mod, h}].dirty { return true } } } for _, h := range goSum.m[mod] { if !strings.HasPrefix(h, "h1:") { continue } if !goSum.status[modSum{mod, h}].dirty { return true } } return false } // RecordedSum returns the sum if the go.sum file contains an entry for mod. // The boolean reports true if an entry was found or // false if no entry found or two conflicting sums are found. // The entry's hash must be generated with a known hash algorithm. // mod.Version may have a "/go.mod" suffix to distinguish sums for // .mod and .zip files. func RecordedSum(mod module.Version) (sum string, ok bool) { goSum.mu.Lock() defer goSum.mu.Unlock() inited, err := initGoSum() foundSum := "" if err != nil || !inited { return "", false } for _, goSums := range goSum.w { for _, h := range goSums[mod] { if !strings.HasPrefix(h, "h1:") { continue } if !goSum.status[modSum{mod, h}].dirty { if foundSum != "" && foundSum != h { // conflicting sums exist return "", false } foundSum = h } } } for _, h := range goSum.m[mod] { if !strings.HasPrefix(h, "h1:") { continue } if !goSum.status[modSum{mod, h}].dirty { if foundSum != "" && foundSum != h { // conflicting sums exist return "", false } foundSum = h } } return foundSum, true } // checkMod checks the given module's checksum and Go version. func checkMod(ctx context.Context, mod module.Version) { // Do the file I/O before acquiring the go.sum lock. ziphash, err := CachePath(ctx, mod, "ziphash") if err != nil { base.Fatalf("verifying %v", module.VersionError(mod, err)) } data, err := lockedfile.Read(ziphash) if err != nil { base.Fatalf("verifying %v", module.VersionError(mod, err)) } data = bytes.TrimSpace(data) if !isValidSum(data) { // Recreate ziphash file from zip file and use that to check the mod sum. zip, err := CachePath(ctx, mod, "zip") if err != nil { base.Fatalf("verifying %v", module.VersionError(mod, err)) } err = hashZip(mod, zip, ziphash) if err != nil { base.Fatalf("verifying %v", module.VersionError(mod, err)) } return } h := string(data) if !strings.HasPrefix(h, "h1:") { base.Fatalf("verifying %v", module.VersionError(mod, fmt.Errorf("unexpected ziphash: %q", h))) } if err := checkModSum(mod, h); err != nil { base.Fatalf("%s", err) } } // goModSum returns the checksum for the go.mod contents. func goModSum(data []byte) (string, error) { return dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(data)), nil }) } // checkGoMod checks the given module's go.mod checksum; // data is the go.mod content. func checkGoMod(path, version string, data []byte) error { h, err := goModSum(data) if err != nil { return &module.ModuleError{Path: path, Version: version, Err: fmt.Errorf("verifying go.mod: %v", err)} } return checkModSum(module.Version{Path: path, Version: version + "/go.mod"}, h) } // checkModSum checks that the recorded checksum for mod is h. // // mod.Version may have the additional suffix "/go.mod" to request the checksum // for the module's go.mod file only. func checkModSum(mod module.Version, h string) error { // We lock goSum when manipulating it, // but we arrange to release the lock when calling checkSumDB, // so that parallel calls to checkModHash can execute parallel calls // to checkSumDB. // Check whether mod+h is listed in go.sum already. If so, we're done. goSum.mu.Lock() inited, err := initGoSum() if err != nil { goSum.mu.Unlock() return err } done := inited && haveModSumLocked(mod, h) if inited { st := goSum.status[modSum{mod, h}] st.used = true goSum.status[modSum{mod, h}] = st } goSum.mu.Unlock() if done { return nil } // Not listed, so we want to add them. // Consult checksum database if appropriate. if useSumDB(mod) { // Calls base.Fatalf if mismatch detected. if err := checkSumDB(mod, h); err != nil { return err } } // Add mod+h to go.sum, if it hasn't appeared already. if inited { goSum.mu.Lock() addModSumLocked(mod, h) st := goSum.status[modSum{mod, h}] st.dirty = true goSum.status[modSum{mod, h}] = st goSum.mu.Unlock() } return nil } // haveModSumLocked reports whether the pair mod,h is already listed in go.sum. // If it finds a conflicting pair instead, it calls base.Fatalf. // goSum.mu must be locked. func haveModSumLocked(mod module.Version, h string) bool { sumFileName := "go.sum" if strings.HasSuffix(GoSumFile, "go.work.sum") { sumFileName = "go.work.sum" } for _, vh := range goSum.m[mod] { if h == vh { return true } if strings.HasPrefix(vh, "h1:") { base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, sumFileName, vh) } } // Also check workspace sums. foundMatch := false // Check sums from all files in case there are conflicts between // the files. for goSumFile, goSums := range goSum.w { for _, vh := range goSums[mod] { if h == vh { foundMatch = true } else if strings.HasPrefix(vh, "h1:") { base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, goSumFile, vh) } } } return foundMatch } // addModSumLocked adds the pair mod,h to go.sum. // goSum.mu must be locked. func addModSumLocked(mod module.Version, h string) { if haveModSumLocked(mod, h) { return } if len(goSum.m[mod]) > 0 { fmt.Fprintf(os.Stderr, "warning: verifying %s@%s: unknown hashes in go.sum: %v; adding %v"+hashVersionMismatch, mod.Path, mod.Version, strings.Join(goSum.m[mod], ", "), h) } goSum.m[mod] = append(goSum.m[mod], h) } // checkSumDB checks the mod, h pair against the Go checksum database. // It calls base.Fatalf if the hash is to be rejected. func checkSumDB(mod module.Version, h string) error { modWithoutSuffix := mod noun := "module" if before, found := strings.CutSuffix(mod.Version, "/go.mod"); found { noun = "go.mod" modWithoutSuffix.Version = before } db, lines, err := lookupSumDB(mod) if err != nil { return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: %v", noun, err)) } have := mod.Path + " " + mod.Version + " " + h prefix := mod.Path + " " + mod.Version + " h1:" for _, line := range lines { if line == have { return nil } if strings.HasPrefix(line, prefix) { return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, noun, h, db, line[len(prefix)-len("h1:"):])) } } return nil } // Sum returns the checksum for the downloaded copy of the given module, // if present in the download cache. func Sum(ctx context.Context, mod module.Version) string { if cfg.GOMODCACHE == "" { // Do not use current directory. return "" } ziphash, err := CachePath(ctx, mod, "ziphash") if err != nil { return "" } data, err := lockedfile.Read(ziphash) if err != nil { return "" } data = bytes.TrimSpace(data) if !isValidSum(data) { return "" } return string(data) } // isValidSum returns true if data is the valid contents of a zip hash file. // Certain critical files are written to disk by first truncating // then writing the actual bytes, so that if the write fails // the corrupt file should contain at least one of the null // bytes written by the truncate operation. func isValidSum(data []byte) bool { if bytes.IndexByte(data, '\000') >= 0 { return false } if len(data) != len("h1:")+base64.StdEncoding.EncodedLen(sha256.Size) { return false } return true } var ErrGoSumDirty = errors.New("updates to go.sum needed, disabled by -mod=readonly") // WriteGoSum writes the go.sum file if it needs to be updated. // // keep is used to check whether a newly added sum should be saved in go.sum. // It should have entries for both module content sums and go.mod sums // (version ends with "/go.mod"). Existing sums will be preserved unless they // have been marked for deletion with TrimGoSum. func WriteGoSum(ctx context.Context, keep map[module.Version]bool, readonly bool) error { goSum.mu.Lock() defer goSum.mu.Unlock() // If we haven't read the go.sum file yet, don't bother writing it. if !goSum.enabled { return nil } // Check whether we need to add sums for which keep[m] is true or remove // unused sums marked with TrimGoSum. If there are no changes to make, // just return without opening go.sum. dirty := false Outer: for m, hs := range goSum.m { for _, h := range hs { st := goSum.status[modSum{m, h}] if st.dirty && (!st.used || keep[m]) { dirty = true break Outer } } } if !dirty { return nil } if readonly { return ErrGoSumDirty } if _, ok := fsys.OverlayPath(GoSumFile); ok { base.Fatalf("go: updates to go.sum needed, but go.sum is part of the overlay specified with -overlay") } // Make a best-effort attempt to acquire the side lock, only to exclude // previous versions of the 'go' command from making simultaneous edits. if unlock, err := SideLock(ctx); err == nil { defer unlock() } err := lockedfile.Transform(GoSumFile, func(data []byte) ([]byte, error) { tidyGoSum := tidyGoSum(data, keep) return tidyGoSum, nil }) if err != nil { return fmt.Errorf("updating go.sum: %w", err) } goSum.status = make(map[modSum]modSumStatus) goSum.overwrite = false return nil } // TidyGoSum returns a tidy version of the go.sum file. // A missing go.sum file is treated as if empty. func TidyGoSum(keep map[module.Version]bool) (before, after []byte) { goSum.mu.Lock() defer goSum.mu.Unlock() before, err := lockedfile.Read(GoSumFile) if err != nil && !errors.Is(err, fs.ErrNotExist) { base.Fatalf("reading go.sum: %v", err) } after = tidyGoSum(before, keep) return before, after } // tidyGoSum returns a tidy version of the go.sum file. // The goSum lock must be held. func tidyGoSum(data []byte, keep map[module.Version]bool) []byte { if !goSum.overwrite { // Incorporate any sums added by other processes in the meantime. // Add only the sums that we actually checked: the user may have edited or // truncated the file to remove erroneous hashes, and we shouldn't restore // them without good reason. goSum.m = make(map[module.Version][]string, len(goSum.m)) readGoSum(goSum.m, GoSumFile, data) for ms, st := range goSum.status { if st.used && !sumInWorkspaceModulesLocked(ms.mod) { addModSumLocked(ms.mod, ms.sum) } } } var mods []module.Version for m := range goSum.m { mods = append(mods, m) } module.Sort(mods) var buf bytes.Buffer for _, m := range mods { list := goSum.m[m] sort.Strings(list) str.Uniq(&list) for _, h := range list { st := goSum.status[modSum{m, h}] if (!st.dirty || (st.used && keep[m])) && !sumInWorkspaceModulesLocked(m) { fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h) } } } return buf.Bytes() } func sumInWorkspaceModulesLocked(m module.Version) bool { for _, goSums := range goSum.w { if _, ok := goSums[m]; ok { return true } } return false } // TrimGoSum trims go.sum to contain only the modules needed for reproducible // builds. // // keep is used to check whether a sum should be retained in go.mod. It should // have entries for both module content sums and go.mod sums (version ends // with "/go.mod"). func TrimGoSum(keep map[module.Version]bool) { goSum.mu.Lock() defer goSum.mu.Unlock() inited, err := initGoSum() if err != nil { base.Fatalf("%s", err) } if !inited { return } for m, hs := range goSum.m { if !keep[m] { for _, h := range hs { goSum.status[modSum{m, h}] = modSumStatus{used: false, dirty: true} } goSum.overwrite = true } } } const goSumMismatch = ` SECURITY ERROR This download does NOT match an earlier download recorded in go.sum. The bits may have been replaced on the origin server, or an attacker may have intercepted the download attempt. For more information, see 'go help module-auth'. ` const sumdbMismatch = ` SECURITY ERROR This download does NOT match the one reported by the checksum server. The bits may have been replaced on the origin server, or an attacker may have intercepted the download attempt. For more information, see 'go help module-auth'. ` const hashVersionMismatch = ` SECURITY WARNING This download is listed in go.sum, but using an unknown hash algorithm. The download cannot be verified. For more information, see 'go help module-auth'. ` var HelpModuleAuth = &base.Command{ UsageLine: "module-auth", Short: "module authentication using go.sum", Long: ` When the go command downloads a module zip file or go.mod file into the module cache, it computes a cryptographic hash and compares it with a known value to verify the file hasn't changed since it was first downloaded. Known hashes are stored in a file in the module root directory named go.sum. Hashes may also be downloaded from the checksum database depending on the values of GOSUMDB, GOPRIVATE, and GONOSUMDB. For details, see https://golang.org/ref/mod#authenticating. `, } var HelpPrivate = &base.Command{ UsageLine: "private", Short: "configuration for downloading non-public code", Long: ` The go command defaults to downloading modules from the public Go module mirror at proxy.golang.org. It also defaults to validating downloaded modules, regardless of source, against the public Go checksum database at sum.golang.org. These defaults work well for publicly available source code. The GOPRIVATE environment variable controls which modules the go command considers to be private (not available publicly) and should therefore not use the proxy or checksum database. The variable is a comma-separated list of glob patterns (in the syntax of Go's path.Match) of module path prefixes. For example, GOPRIVATE=*.corp.example.com,rsc.io/private causes the go command to treat as private any module with a path prefix matching either pattern, including git.corp.example.com/xyzzy, rsc.io/private, and rsc.io/private/quux. For fine-grained control over module download and validation, the GONOPROXY and GONOSUMDB environment variables accept the same kind of glob list and override GOPRIVATE for the specific decision of whether to use the proxy and checksum database, respectively. For example, if a company ran a module proxy serving private modules, users would configure go using: GOPRIVATE=*.corp.example.com GOPROXY=proxy.example.com GONOPROXY=none The GOPRIVATE variable is also used to define the "public" and "private" patterns for the GOVCS variable; see 'go help vcs'. For that usage, GOPRIVATE applies even in GOPATH mode. In that case, it matches import paths instead of module paths. The 'go env -w' command (see 'go help env') can be used to set these variables for future go command invocations. For more details, see https://golang.org/ref/mod#private-modules. `, }
go/src/cmd/go/internal/modfetch/fetch.go/0
{ "file_path": "go/src/cmd/go/internal/modfetch/fetch.go", "repo_id": "go", "token_count": 11251 }
137
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file is a lightly modified copy go/build/syslist_test.go. package modindex // knownOS is the list of past, present, and future known GOOS values. // Do not remove from this list, as it is used for filename matching. // If you add an entry to this list, look at unixOS, below. var knownOS = map[string]bool{ "aix": true, "android": true, "darwin": true, "dragonfly": true, "freebsd": true, "hurd": true, "illumos": true, "ios": true, "js": true, "linux": true, "nacl": true, "netbsd": true, "openbsd": true, "plan9": true, "solaris": true, "wasip1": true, "windows": true, "zos": true, } // unixOS is the set of GOOS values matched by the "unix" build tag. // This is not used for filename matching. // This list also appears in cmd/dist/build.go. var unixOS = map[string]bool{ "aix": true, "android": true, "darwin": true, "dragonfly": true, "freebsd": true, "hurd": true, "illumos": true, "ios": true, "linux": true, "netbsd": true, "openbsd": true, "solaris": true, } // knownArch is the list of past, present, and future known GOARCH values. // Do not remove from this list, as it is used for filename matching. var knownArch = map[string]bool{ "386": true, "amd64": true, "amd64p32": true, "arm": true, "armbe": true, "arm64": true, "arm64be": true, "loong64": true, "mips": true, "mipsle": true, "mips64": true, "mips64le": true, "mips64p32": true, "mips64p32le": true, "ppc": true, "ppc64": true, "ppc64le": true, "riscv": true, "riscv64": true, "s390": true, "s390x": true, "sparc": true, "sparc64": true, "wasm": true, }
go/src/cmd/go/internal/modindex/syslist.go/0
{ "file_path": "go/src/cmd/go/internal/modindex/syslist.go", "repo_id": "go", "token_count": 930 }
138
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modload import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "os" "runtime" "strings" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/gover" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/modinfo" "cmd/go/internal/search" "cmd/internal/pkgpattern" "golang.org/x/mod/module" ) type ListMode int const ( ListU ListMode = 1 << iota ListRetracted ListDeprecated ListVersions ListRetractedVersions ) // ListModules returns a description of the modules matching args, if known, // along with any error preventing additional matches from being identified. // // The returned slice can be nonempty even if the error is non-nil. func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) { var reuse map[module.Version]*modinfo.ModulePublic if reuseFile != "" { data, err := os.ReadFile(reuseFile) if err != nil { return nil, err } dec := json.NewDecoder(bytes.NewReader(data)) reuse = make(map[module.Version]*modinfo.ModulePublic) for { var m modinfo.ModulePublic if err := dec.Decode(&m); err != nil { if err == io.EOF { break } return nil, fmt.Errorf("parsing %s: %v", reuseFile, err) } if m.Origin == nil { continue } m.Reuse = true reuse[module.Version{Path: m.Path, Version: m.Version}] = &m if m.Query != "" { reuse[module.Version{Path: m.Path, Version: m.Query}] = &m } } } rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) if mode != 0 { for _, m := range mods { if m.Reuse { continue } add := func(m *modinfo.ModulePublic) { sem <- token{} go func() { if mode&ListU != 0 { addUpdate(ctx, m) } if mode&ListVersions != 0 { addVersions(ctx, m, mode&ListRetractedVersions != 0) } if mode&ListRetracted != 0 { addRetraction(ctx, m) } if mode&ListDeprecated != 0 { addDeprecation(ctx, m) } <-sem }() } add(m) if m.Replace != nil { add(m.Replace) } } } // Fill semaphore channel to wait for all tasks to finish. for n := cap(sem); n > 0; n-- { sem <- token{} } if err == nil { requirements = rs // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3 // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum // should be considered up-to-date. The fix for now is to always treat the // go.sum as up-to-date during list -m -u. Probably the right fix is more targeted, // but in general list -u is looking up other checksums in the checksum database // that won't be necessary later, so it makes sense not to write the go.sum back out. if !ExplicitWriteGoMod && mode&ListU == 0 { err = commitRequirements(ctx, WriteOpts{}) } } return mods, err } func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { if len(args) == 0 { var ms []*modinfo.ModulePublic for _, m := range MainModules.Versions() { if gover.IsToolchain(m.Path) { continue } ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse)) } return rs, ms, nil } needFullGraph := false for _, arg := range args { if strings.Contains(arg, `\`) { base.Fatalf("go: module paths never use backslash") } if search.IsRelativePath(arg) { base.Fatalf("go: cannot use relative path %s to specify module", arg) } if arg == "all" || strings.Contains(arg, "...") { needFullGraph = true if !HasModRoot() { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } continue } if path, vers, found := strings.Cut(arg, "@"); found { if vers == "upgrade" || vers == "patch" { if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned { needFullGraph = true if !HasModRoot() { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } } } continue } if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned { needFullGraph = true if mode&ListVersions == 0 && !HasModRoot() { base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) } } } var mg *ModuleGraph if needFullGraph { rs, mg, mgErr = expandGraph(ctx, rs) } matchedModule := map[module.Version]bool{} for _, arg := range args { if path, vers, found := strings.Cut(arg, "@"); found { var current string if mg == nil { current, _ = rs.rootSelected(path) } else { current = mg.Selected(path) } if current == "none" && mgErr != nil { if vers == "upgrade" || vers == "patch" { // The module graph is incomplete, so we don't know what version we're // actually upgrading from. // mgErr is already set, so just skip this module. continue } } allowed := CheckAllowed if IsRevisionQuery(path, vers) || mode&ListRetracted != 0 { // Allow excluded and retracted versions if the user asked for a // specific revision or used 'go list -retracted'. allowed = nil } info, err := queryReuse(ctx, path, vers, current, allowed, reuse) if err != nil { var origin *codehost.Origin if info != nil { origin = info.Origin } mods = append(mods, &modinfo.ModulePublic{ Path: path, Version: vers, Error: modinfoError(path, vers, err), Origin: origin, }) continue } // Indicate that m was resolved from outside of rs by passing a nil // *Requirements instead. var noRS *Requirements mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) if vers != mod.Version { mod.Query = vers } mod.Origin = info.Origin mods = append(mods, mod) continue } // Module path or pattern. var match func(string) bool if arg == "all" { match = func(p string) bool { return !gover.IsToolchain(p) } } else if strings.Contains(arg, "...") { mp := pkgpattern.MatchPattern(arg) match = func(p string) bool { return mp(p) && !gover.IsToolchain(p) } } else { var v string if mg == nil { var ok bool v, ok = rs.rootSelected(arg) if !ok { // We checked rootSelected(arg) in the earlier args loop, so if there // is no such root we should have loaded a non-nil mg. panic(fmt.Sprintf("internal error: root requirement expected but not found for %v", arg)) } } else { v = mg.Selected(arg) } if v == "none" && mgErr != nil { // mgErr is already set, so just skip this module. continue } if v != "none" { mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) } else if cfg.BuildMod == "vendor" { // In vendor mode, we can't determine whether a missing module is “a // known dependency” because the module graph is incomplete. // Give a more explicit error message. mods = append(mods, &modinfo.ModulePublic{ Path: arg, Error: modinfoError(arg, "", errors.New("can't resolve module using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")), }) } else if mode&ListVersions != 0 { // Don't make the user provide an explicit '@latest' when they're // explicitly asking what the available versions are. Instead, return a // module with version "none", to which we can add the requested list. mods = append(mods, &modinfo.ModulePublic{Path: arg}) } else { mods = append(mods, &modinfo.ModulePublic{ Path: arg, Error: modinfoError(arg, "", errors.New("not a known dependency")), }) } continue } matched := false for _, m := range mg.BuildList() { if match(m.Path) { matched = true if !matchedModule[m] { matchedModule[m] = true mods = append(mods, moduleInfo(ctx, rs, m, mode, reuse)) } } } if !matched { fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg) } } return rs, mods, mgErr } // modinfoError wraps an error to create an error message in // modinfo.ModuleError with minimal redundancy. func modinfoError(path, vers string, err error) *modinfo.ModuleError { var nerr *NoMatchingVersionError var merr *module.ModuleError if errors.As(err, &nerr) { // NoMatchingVersionError contains the query, so we don't mention the // query again in ModuleError. err = &module.ModuleError{Path: path, Err: err} } else if !errors.As(err, &merr) { // If the error does not contain path and version, wrap it in a // module.ModuleError. err = &module.ModuleError{Path: path, Version: vers, Err: err} } return &modinfo.ModuleError{Err: err.Error()} }
go/src/cmd/go/internal/modload/list.go/0
{ "file_path": "go/src/cmd/go/internal/modload/list.go", "repo_id": "go", "token_count": 3518 }
139
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package par import "fmt" // Queue manages a set of work items to be executed in parallel. The number of // active work items is limited, and excess items are queued sequentially. type Queue struct { maxActive int st chan queueState } type queueState struct { active int // number of goroutines processing work; always nonzero when len(backlog) > 0 backlog []func() idle chan struct{} // if non-nil, closed when active becomes 0 } // NewQueue returns a Queue that executes up to maxActive items in parallel. // // maxActive must be positive. func NewQueue(maxActive int) *Queue { if maxActive < 1 { panic(fmt.Sprintf("par.NewQueue called with nonpositive limit (%d)", maxActive)) } q := &Queue{ maxActive: maxActive, st: make(chan queueState, 1), } q.st <- queueState{} return q } // Add adds f as a work item in the queue. // // Add returns immediately, but the queue will be marked as non-idle until after // f (and any subsequently-added work) has completed. func (q *Queue) Add(f func()) { st := <-q.st if st.active == q.maxActive { st.backlog = append(st.backlog, f) q.st <- st return } if st.active == 0 { // Mark q as non-idle. st.idle = nil } st.active++ q.st <- st go func() { for { f() st := <-q.st if len(st.backlog) == 0 { if st.active--; st.active == 0 && st.idle != nil { close(st.idle) } q.st <- st return } f, st.backlog = st.backlog[0], st.backlog[1:] q.st <- st } }() } // Idle returns a channel that will be closed when q has no (active or enqueued) // work outstanding. func (q *Queue) Idle() <-chan struct{} { st := <-q.st defer func() { q.st <- st }() if st.idle == nil { st.idle = make(chan struct{}) if st.active == 0 { close(st.idle) } } return st.idle }
go/src/cmd/go/internal/par/queue.go/0
{ "file_path": "go/src/cmd/go/internal/par/queue.go", "repo_id": "go", "token_count": 747 }
140
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package scripttest adapts the script engine for use in tests. package scripttest import ( "bufio" "cmd/go/internal/cfg" "cmd/go/internal/script" "errors" "io" "strings" "testing" ) // DefaultCmds returns a set of broadly useful script commands. // // This set includes all of the commands in script.DefaultCmds, // as well as a "skip" command that halts the script and causes the // testing.TB passed to Run to be skipped. func DefaultCmds() map[string]script.Cmd { cmds := script.DefaultCmds() cmds["skip"] = Skip() return cmds } // DefaultConds returns a set of broadly useful script conditions. // // This set includes all of the conditions in script.DefaultConds, // as well as: // // - Conditions of the form "exec:foo" are active when the executable "foo" is // found in the test process's PATH, and inactive when the executable is // not found. // // - "short" is active when testing.Short() is true. // // - "verbose" is active when testing.Verbose() is true. func DefaultConds() map[string]script.Cond { conds := script.DefaultConds() conds["exec"] = CachedExec() conds["short"] = script.BoolCondition("testing.Short()", testing.Short()) conds["verbose"] = script.BoolCondition("testing.Verbose()", testing.Verbose()) return conds } // Run runs the script from the given filename starting at the given initial state. // When the script completes, Run closes the state. func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testScript io.Reader) { t.Helper() err := func() (err error) { log := new(strings.Builder) log.WriteString("\n") // Start output on a new line for consistent indentation. // Defer writing to the test log in case the script engine panics during execution, // but write the log before we write the final "skip" or "FAIL" line. t.Helper() defer func() { t.Helper() if closeErr := s.CloseAndWait(log); err == nil { err = closeErr } if log.Len() > 0 { t.Log(strings.TrimSuffix(log.String(), "\n")) } }() if testing.Verbose() { // Add the environment to the start of the script log. wait, err := script.Env().Run(s) if err != nil { t.Fatal(err) } if wait != nil { stdout, stderr, err := wait(s) if err != nil { t.Fatalf("env: %v\n%s", err, stderr) } if len(stdout) > 0 { s.Logf("%s\n", stdout) } } } return e.Execute(s, filename, bufio.NewReader(testScript), log) }() if skip := (skipError{}); errors.As(err, &skip) { if skip.msg == "" { t.Skip("SKIP") } else { t.Skipf("SKIP: %v", skip.msg) } } if err != nil { t.Errorf("FAIL: %v", err) } } // Skip returns a sentinel error that causes Run to mark the test as skipped. func Skip() script.Cmd { return script.Command( script.CmdUsage{ Summary: "skip the current test", Args: "[msg]", }, func(_ *script.State, args ...string) (script.WaitFunc, error) { if len(args) > 1 { return nil, script.ErrUsage } if len(args) == 0 { return nil, skipError{""} } return nil, skipError{args[0]} }) } type skipError struct { msg string } func (s skipError) Error() string { if s.msg == "" { return "skip" } return s.msg } // CachedExec returns a Condition that reports whether the PATH of the test // binary itself (not the script's current environment) contains the named // executable. func CachedExec() script.Cond { return script.CachedCondition( "<suffix> names an executable in the test binary's PATH", func(name string) (bool, error) { _, err := cfg.LookPath(name) return err == nil, nil }) }
go/src/cmd/go/internal/script/scripttest/scripttest.go/0
{ "file_path": "go/src/cmd/go/internal/script/scripttest/scripttest.go", "repo_id": "go", "token_count": 1372 }
141
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package genflags import ( "flag" "strings" "testing" ) // ShortTestFlags returns the set of "-test." flag shorthand names that end // users may pass to 'go test'. func ShortTestFlags() []string { testing.Init() var names []string flag.VisitAll(func(f *flag.Flag) { var name string var found bool if name, found = strings.CutPrefix(f.Name, "test."); !found { return } switch name { case "testlogfile", "paniconexit0", "fuzzcachedir", "fuzzworker", "gocoverdir": // These flags are only for use by cmd/go. default: names = append(names, name) } }) return names }
go/src/cmd/go/internal/test/internal/genflags/testflag.go/0
{ "file_path": "go/src/cmd/go/internal/test/internal/genflags/testflag.go", "repo_id": "go", "token_count": 263 }
142
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package vcstest serves the repository scripts in cmd/go/testdata/vcstest // using the [vcweb] script engine. package vcstest import ( "cmd/go/internal/vcs" "cmd/go/internal/vcweb" "cmd/go/internal/web" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "internal/testenv" "io" "log" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "testing" ) var Hosts = []string{ "vcs-test.golang.org", } type Server struct { vcweb *vcweb.Server workDir string HTTP *httptest.Server HTTPS *httptest.Server } // NewServer returns a new test-local vcweb server that serves VCS requests // for modules with paths that begin with "vcs-test.golang.org" using the // scripts in cmd/go/testdata/vcstest. func NewServer() (srv *Server, err error) { if vcs.VCSTestRepoURL != "" { panic("vcs URL hooks already set") } scriptDir := filepath.Join(testenv.GOROOT(nil), "src/cmd/go/testdata/vcstest") workDir, err := os.MkdirTemp("", "vcstest") if err != nil { return nil, err } defer func() { if err != nil { os.RemoveAll(workDir) } }() logger := log.Default() if !testing.Verbose() { logger = log.New(io.Discard, "", log.LstdFlags) } handler, err := vcweb.NewServer(scriptDir, workDir, logger) if err != nil { return nil, err } defer func() { if err != nil { handler.Close() } }() srvHTTP := httptest.NewServer(handler) httpURL, err := url.Parse(srvHTTP.URL) if err != nil { return nil, err } defer func() { if err != nil { srvHTTP.Close() } }() srvHTTPS := httptest.NewTLSServer(handler) httpsURL, err := url.Parse(srvHTTPS.URL) if err != nil { return nil, err } defer func() { if err != nil { srvHTTPS.Close() } }() srv = &Server{ vcweb: handler, workDir: workDir, HTTP: srvHTTP, HTTPS: srvHTTPS, } vcs.VCSTestRepoURL = srv.HTTP.URL vcs.VCSTestHosts = Hosts var interceptors []web.Interceptor for _, host := range Hosts { interceptors = append(interceptors, web.Interceptor{Scheme: "http", FromHost: host, ToHost: httpURL.Host, Client: srv.HTTP.Client()}, web.Interceptor{Scheme: "https", FromHost: host, ToHost: httpsURL.Host, Client: srv.HTTPS.Client()}) } web.EnableTestHooks(interceptors) fmt.Fprintln(os.Stderr, "vcs-test.golang.org rerouted to "+srv.HTTP.URL) fmt.Fprintln(os.Stderr, "https://vcs-test.golang.org rerouted to "+srv.HTTPS.URL) return srv, nil } func (srv *Server) Close() error { if vcs.VCSTestRepoURL != srv.HTTP.URL { panic("vcs URL hooks modified before Close") } vcs.VCSTestRepoURL = "" vcs.VCSTestHosts = nil web.DisableTestHooks() srv.HTTP.Close() srv.HTTPS.Close() err := srv.vcweb.Close() if rmErr := os.RemoveAll(srv.workDir); err == nil { err = rmErr } return err } func (srv *Server) WriteCertificateFile() (string, error) { b := pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: srv.HTTPS.Certificate().Raw, }) filename := filepath.Join(srv.workDir, "cert.pem") if err := os.WriteFile(filename, b, 0644); err != nil { return "", err } return filename, nil } // TLSClient returns an http.Client that can talk to the httptest.Server // whose certificate is written to the given file path. func TLSClient(certFile string) (*http.Client, error) { client := &http.Client{ Transport: http.DefaultTransport.(*http.Transport).Clone(), } pemBytes, err := os.ReadFile(certFile) if err != nil { return nil, err } certpool := x509.NewCertPool() if !certpool.AppendCertsFromPEM(pemBytes) { return nil, fmt.Errorf("no certificates found in %s", certFile) } client.Transport.(*http.Transport).TLSClientConfig = &tls.Config{ RootCAs: certpool, } return client, nil }
go/src/cmd/go/internal/vcweb/vcstest/vcstest.go/0
{ "file_path": "go/src/cmd/go/internal/vcweb/vcstest/vcstest.go", "repo_id": "go", "token_count": 1579 }
143
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package web var urlTests = []struct { url string filePath string canonicalURL string // If empty, assume equal to url. wantErr string }{ // Examples from https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/: { url: `file://laptop/My%20Documents/FileSchemeURIs.doc`, filePath: `\\laptop\My Documents\FileSchemeURIs.doc`, }, { url: `file:///C:/Documents%20and%20Settings/davris/FileSchemeURIs.doc`, filePath: `C:\Documents and Settings\davris\FileSchemeURIs.doc`, }, { url: `file:///D:/Program%20Files/Viewer/startup.htm`, filePath: `D:\Program Files\Viewer\startup.htm`, }, { url: `file:///C:/Program%20Files/Music/Web%20Sys/main.html?REQUEST=RADIO`, filePath: `C:\Program Files\Music\Web Sys\main.html`, canonicalURL: `file:///C:/Program%20Files/Music/Web%20Sys/main.html`, }, { url: `file://applib/products/a-b/abc_9/4148.920a/media/start.swf`, filePath: `\\applib\products\a-b\abc_9\4148.920a\media\start.swf`, }, { url: `file:////applib/products/a%2Db/abc%5F9/4148.920a/media/start.swf`, wantErr: "file URL missing drive letter", }, { url: `C:\Program Files\Music\Web Sys\main.html?REQUEST=RADIO`, wantErr: "non-file URL", }, // The example "file://D:\Program Files\Viewer\startup.htm" errors out in // url.Parse, so we substitute a slash-based path for testing instead. { url: `file://D:/Program Files/Viewer/startup.htm`, wantErr: "file URL encodes volume in host field: too few slashes?", }, // The blog post discourages the use of non-ASCII characters because they // depend on the user's current codepage. However, when we are working with Go // strings we assume UTF-8 encoding, and our url package refuses to encode // URLs to non-ASCII strings. { url: `file:///C:/exampleㄓ.txt`, filePath: `C:\exampleㄓ.txt`, canonicalURL: `file:///C:/example%E3%84%93.txt`, }, { url: `file:///C:/example%E3%84%93.txt`, filePath: `C:\exampleㄓ.txt`, }, // Examples from RFC 8089: // We allow the drive-letter variation from section E.2, because it is // simpler to support than not to. However, we do not generate the shorter // form in the reverse direction. { url: `file:c:/path/to/file`, filePath: `c:\path\to\file`, canonicalURL: `file:///c:/path/to/file`, }, // We encode the UNC share name as the authority following section E.3.1, // because that is what the Microsoft blog post explicitly recommends. { url: `file://host.example.com/Share/path/to/file.txt`, filePath: `\\host.example.com\Share\path\to\file.txt`, }, // We decline the four- and five-slash variations from section E.3.2. // The paths in these URLs would change meaning under path.Clean. { url: `file:////host.example.com/path/to/file`, wantErr: "file URL missing drive letter", }, { url: `file://///host.example.com/path/to/file`, wantErr: "file URL missing drive letter", }, }
go/src/cmd/go/internal/web/url_windows_test.go/0
{ "file_path": "go/src/cmd/go/internal/web/url_windows_test.go", "repo_id": "go", "token_count": 1251 }
144
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // go work init package workcmd import ( "context" "path/filepath" "cmd/go/internal/base" "cmd/go/internal/fsys" "cmd/go/internal/gover" "cmd/go/internal/modload" "golang.org/x/mod/modfile" ) var cmdInit = &base.Command{ UsageLine: "go work init [moddirs]", Short: "initialize workspace file", Long: `Init initializes and writes a new go.work file in the current directory, in effect creating a new workspace at the current directory. go work init optionally accepts paths to the workspace modules as arguments. If the argument is omitted, an empty workspace with no modules will be created. Each argument path is added to a use directive in the go.work file. The current go version will also be listed in the go.work file. See the workspaces reference at https://go.dev/ref/mod#workspaces for more information. `, Run: runInit, } func init() { base.AddChdirFlag(&cmdInit.Flag) base.AddModCommonFlags(&cmdInit.Flag) } func runInit(ctx context.Context, cmd *base.Command, args []string) { modload.InitWorkfile() modload.ForceUseModules = true gowork := modload.WorkFilePath() if gowork == "" { gowork = filepath.Join(base.Cwd(), "go.work") } if _, err := fsys.Stat(gowork); err == nil { base.Fatalf("go: %s already exists", gowork) } goV := gover.Local() // Use current Go version by default wf := new(modfile.WorkFile) wf.Syntax = new(modfile.FileSyntax) wf.AddGoStmt(goV) workUse(ctx, gowork, wf, args) modload.WriteWorkFile(gowork, wf) }
go/src/cmd/go/internal/workcmd/init.go/0
{ "file_path": "go/src/cmd/go/internal/workcmd/init.go", "repo_id": "go", "token_count": 568 }
145
This directory holds Go modules served by a Go module proxy that runs on localhost during tests, both to make tests avoid requiring specific network servers and also to make them significantly faster. A small go get'able test module can be added here by running cd cmd/go/testdata go run addmod.go path@vers where path and vers are the module path and version to add here. For interactive experimentation using this set of modules, run: cd cmd/go go test -proxy=localhost:1234 & export GOPROXY=http://localhost:1234/mod and then run go commands as usual. Modules saved to this directory should be small: a few kilobytes at most. It is acceptable to edit the archives created by addmod.go to remove or shorten files. It is also acceptable to write module archives by hand: they need not be backed by some public git repo. Each module archive is named path_vers.txt, where slashes in path have been replaced with underscores. The archive must contain two files ".info" and ".mod", to be served as the info and mod files in the proxy protocol (see https://research.swtch.com/vgo-module). The remaining files are served as the content of the module zip file. The path@vers prefix required of files in the zip file is added automatically by the proxy: the files in the archive have names without the prefix, like plain "go.mod", "x.go", and so on. See ../addmod.go and ../savedir.go for tools to generate txtar files, although again it is also fine to write them by hand.
go/src/cmd/go/testdata/mod/README/0
{ "file_path": "go/src/cmd/go/testdata/mod/README", "repo_id": "go", "token_count": 390 }
146
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "flag" "internal/diff" "os" "path/filepath" "strings" "testing" "text/scanner" ) var update = flag.Bool("update", false, "update .golden files") // gofmtFlags looks for a comment of the form // // //gofmt flags // // within the first maxLines lines of the given file, // and returns the flags string, if any. Otherwise it // returns the empty string. func gofmtFlags(filename string, maxLines int) string { f, err := os.Open(filename) if err != nil { return "" // ignore errors - they will be found later } defer f.Close() // initialize scanner var s scanner.Scanner s.Init(f) s.Error = func(*scanner.Scanner, string) {} // ignore errors s.Mode = scanner.GoTokens &^ scanner.SkipComments // want comments // look for //gofmt comment for s.Line <= maxLines { switch s.Scan() { case scanner.Comment: const prefix = "//gofmt " if t := s.TokenText(); strings.HasPrefix(t, prefix) { return strings.TrimSpace(t[len(prefix):]) } case scanner.EOF: return "" } } return "" } func runTest(t *testing.T, in, out string) { // process flags *simplifyAST = false *rewriteRule = "" info, err := os.Lstat(in) if err != nil { t.Error(err) return } for _, flag := range strings.Split(gofmtFlags(in, 20), " ") { elts := strings.SplitN(flag, "=", 2) name := elts[0] value := "" if len(elts) == 2 { value = elts[1] } switch name { case "": // no flags case "-r": *rewriteRule = value case "-s": *simplifyAST = true case "-stdin": // fake flag - pretend input is from stdin info = nil default: t.Errorf("unrecognized flag name: %s", name) } } initParserMode() initRewrite() const maxWeight = 2 << 20 var buf, errBuf bytes.Buffer s := newSequencer(maxWeight, &buf, &errBuf) s.Add(fileWeight(in, info), func(r *reporter) error { return processFile(in, info, nil, r) }) if errBuf.Len() > 0 { t.Logf("%q", errBuf.Bytes()) } if s.GetExitCode() != 0 { t.Fail() } expected, err := os.ReadFile(out) if err != nil { t.Error(err) return } if got := buf.Bytes(); !bytes.Equal(got, expected) { if *update { if in != out { if err := os.WriteFile(out, got, 0666); err != nil { t.Error(err) } return } // in == out: don't accidentally destroy input t.Errorf("WARNING: -update did not rewrite input file %s", in) } t.Errorf("(gofmt %s) != %s (see %s.gofmt)\n%s", in, out, in, diff.Diff("expected", expected, "got", got)) if err := os.WriteFile(in+".gofmt", got, 0666); err != nil { t.Error(err) } } } // TestRewrite processes testdata/*.input files and compares them to the // corresponding testdata/*.golden files. The gofmt flags used to process // a file must be provided via a comment of the form // // //gofmt flags // // in the processed file within the first 20 lines, if any. func TestRewrite(t *testing.T) { // determine input files match, err := filepath.Glob("testdata/*.input") if err != nil { t.Fatal(err) } // add larger examples match = append(match, "gofmt.go", "gofmt_test.go") for _, in := range match { name := filepath.Base(in) t.Run(name, func(t *testing.T) { out := in // for files where input and output are identical if strings.HasSuffix(in, ".input") { out = in[:len(in)-len(".input")] + ".golden" } runTest(t, in, out) if in != out && !t.Failed() { // Check idempotence. runTest(t, out, out) } }) } } // Test case for issue 3961. func TestCRLF(t *testing.T) { const input = "testdata/crlf.input" // must contain CR/LF's const golden = "testdata/crlf.golden" // must not contain any CR's data, err := os.ReadFile(input) if err != nil { t.Error(err) } if !bytes.Contains(data, []byte("\r\n")) { t.Errorf("%s contains no CR/LF's", input) } data, err = os.ReadFile(golden) if err != nil { t.Error(err) } if bytes.Contains(data, []byte("\r")) { t.Errorf("%s contains CR's", golden) } } func TestBackupFile(t *testing.T) { dir, err := os.MkdirTemp("", "gofmt_test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) name, err := backupFile(filepath.Join(dir, "foo.go"), []byte(" package main"), 0644) if err != nil { t.Fatal(err) } t.Logf("Created: %s", name) }
go/src/cmd/gofmt/gofmt_test.go/0
{ "file_path": "go/src/cmd/gofmt/gofmt_test.go", "repo_id": "go", "token_count": 1807 }
147
// package comment package main import ( "errors" "fmt" "io" "log" "math" ) import ( "fmt" "math" "log" "errors" "io" ) // We reset the line numbering to test that // the formatting works independent of line directives //line :19 import ( "errors" "fmt" "io" "log" "math" "fmt" "math" "log" "errors" "io" ) import ( // a block with comments "errors" "fmt" // for Printf "io" // for Reader "log" // for Fatal "math" ) import ( "fmt" // for Printf "math" "log" // for Fatal "errors" "io" // for Reader ) import ( // for Printf "fmt" "math" // for Fatal "log" "errors" // for Reader "io" ) import ( "errors" "fmt" // for Printf "io" // for Reader "log" // for Fatal "math" "fmt" // for Printf "math" "log" // for Fatal "errors" "io" // for Reader ) import ( "fmt" // for Printf "errors" "io" // for Reader "log" // for Fatal "math" "errors" "fmt" // for Printf "io" // for Reader "log" // for Fatal "math" ) // Test deduping and extended sorting import ( a "A" // aA b "A" // bA1 b "A" // bA2 "B" // B . "B" // .B _ "B" // _b "C" a "D" // aD ) import ( "dedup_by_group" "dedup_by_group" ) import ( "fmt" // for Printf /* comment */ io1 "io" /* comment */ io2 "io" /* comment */ "log" ) import ( "fmt" /* comment */ io1 "io" /* comment */ io2 "io" // hello "math" /* right side */ // end ) import ( "errors" // for New "fmt" /* comment */ io1 "io" /* before */ // after io2 "io" // another // end ) import ( "errors" // for New /* left */ "fmt" /* right */ "log" // for Fatal /* left */ "math" /* right */ ) import /* why */ /* comment here? */ ( /* comment */ "fmt" "math" ) // Reset it again //line :100 // Dedup with different import styles import ( "path" . "path" _ "path" pathpkg "path" ) /* comment */ import ( "fmt" "math" // for Abs // This is a new run "errors" "fmt" ) // End an import declaration in the same line // as the last import. See golang.org/issue/33538. // Note: Must be the last (or 2nd last) line of the file. import ( "fmt" "math" )
go/src/cmd/gofmt/testdata/import.golden/0
{ "file_path": "go/src/cmd/gofmt/testdata/import.golden", "repo_id": "go", "token_count": 973 }
148
//gofmt -r=x+x->2*x // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Rewriting of expressions containing nodes with associated comments to // expressions without those nodes must also eliminate the associated // comments. package p func f(x int) int { _ = 2 * x // this comment remains in the rewrite _ = 2 * x return 2 * x }
go/src/cmd/gofmt/testdata/rewrite5.golden/0
{ "file_path": "go/src/cmd/gofmt/testdata/rewrite5.golden", "repo_id": "go", "token_count": 128 }
149
//gofmt -stdin /* note: no newline at end of file */ for i := 0; i < 10; i++ { s += i }
go/src/cmd/gofmt/testdata/stdin3.golden/0
{ "file_path": "go/src/cmd/gofmt/testdata/stdin3.golden", "repo_id": "go", "token_count": 51 }
150
/* Parenthesized type switch expressions originally accepted by gofmt must continue to be rewritten into the correct unparenthesized form. Only type-switches that didn't declare a variable in the type switch type assertion and which contained only "expression-like" (named) types in their cases were permitted to have their type assertion parenthesized by go/parser (due to a weak predicate in the parser). All others were rejected always, either with a syntax error in the type switch header or in the case. See also issue 4470. */ package p func f() { var x interface{} switch x.(type) { // should remain the same } switch x.(type) { // should become: switch x.(type) { } switch x.(type) { // should remain the same case int: } switch x.(type) { // should become: switch x.(type) { case int: } switch x.(type) { // should remain the same case []int: } // Parenthesized (x.(type)) in type switches containing cases // with unnamed (literal) types were never permitted by gofmt; // thus there won't be any code in the wild using this style if // the code was gofmt-ed. /* switch (x.(type)) { case []int: } */ switch t := x.(type) { // should remain the same default: _ = t } // Parenthesized (x.(type)) in type switches declaring a variable // were never permitted by gofmt; thus there won't be any code in // the wild using this style if the code was gofmt-ed. /* switch t := (x.(type)) { default: _ = t } */ }
go/src/cmd/gofmt/testdata/typeswitch.golden/0
{ "file_path": "go/src/cmd/gofmt/testdata/typeswitch.golden", "repo_id": "go", "token_count": 458 }
151
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bootstrap_test import ( "io" "io/fs" "os" "path/filepath" "strings" ) // overlayDir makes a minimal-overhead copy of srcRoot in which new files may be added. // // TODO: Once we no longer need to support the misc module in GOPATH mode, // factor this function out into a package to reduce duplication. func overlayDir(dstRoot, srcRoot string) error { dstRoot = filepath.Clean(dstRoot) if err := os.MkdirAll(dstRoot, 0777); err != nil { return err } srcRoot, err := filepath.Abs(srcRoot) if err != nil { return err } return filepath.WalkDir(srcRoot, func(srcPath string, entry fs.DirEntry, err error) error { if err != nil || srcPath == srcRoot { return err } if filepath.Base(srcPath) == "testdata" { // We're just building, so no need to copy those. return fs.SkipDir } suffix := strings.TrimPrefix(srcPath, srcRoot) for len(suffix) > 0 && suffix[0] == filepath.Separator { suffix = suffix[1:] } dstPath := filepath.Join(dstRoot, suffix) info, err := entry.Info() perm := info.Mode() & os.ModePerm if info.Mode()&os.ModeSymlink != 0 { info, err = os.Stat(srcPath) if err != nil { return err } perm = info.Mode() & os.ModePerm } // Always make copies of directories. // If we add a file in the overlay, we don't want to add it in the original. if info.IsDir() { return os.MkdirAll(dstPath, perm|0200) } // If we can use a hard link, do that instead of copying bytes. // Go builds don't like symlinks in some cases, such as go:embed. if err := os.Link(srcPath, dstPath); err == nil { return nil } // Otherwise, copy the bytes. src, err := os.Open(srcPath) if err != nil { return err } defer src.Close() dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) if err != nil { return err } _, err = io.Copy(dst, src) if closeErr := dst.Close(); err == nil { err = closeErr } return err }) }
go/src/cmd/internal/bootstrap_test/overlaydir_test.go/0
{ "file_path": "go/src/cmd/internal/bootstrap_test/overlaydir_test.go", "repo_id": "go", "token_count": 812 }
152
package main import "os" func main() { println(len(os.Args)) }
go/src/cmd/internal/cov/testdata/small.go/0
{ "file_path": "go/src/cmd/internal/cov/testdata/small.go", "repo_id": "go", "token_count": 27 }
153
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package moddeps_test import ( "bytes" "encoding/json" "fmt" "internal/testenv" "io" "io/fs" "os" "path/filepath" "slices" "sort" "strings" "sync" "testing" "golang.org/x/mod/module" ) // TestAllDependencies ensures dependencies of all // modules in GOROOT are in a consistent state. // // In short mode, it does a limited quick check and stops there. // In long mode, it also makes a copy of the entire GOROOT tree // and requires network access to perform more thorough checks. // Keep this distinction in mind when adding new checks. // // See issues 36852, 41409, and 43687. // (Also see golang.org/issue/27348.) func TestAllDependencies(t *testing.T) { goBin := testenv.GoToolPath(t) // Ensure that all packages imported within GOROOT // are vendored in the corresponding GOROOT module. // // This property allows offline development within the Go project, and ensures // that all dependency changes are presented in the usual code review process. // // As a quick first-order check, avoid network access and the need to copy the // entire GOROOT tree or explicitly invoke version control to check for changes. // Just check that packages are vendored. (In non-short mode, we go on to also // copy the GOROOT tree and perform more rigorous consistency checks. Jump below // for more details.) for _, m := range findGorootModules(t) { // This short test does NOT ensure that the vendored contents match // the unmodified contents of the corresponding dependency versions. t.Run(m.Path+"(quick)", func(t *testing.T) { t.Logf("module %s in directory %s", m.Path, m.Dir) if m.hasVendor { // Load all of the packages in the module to ensure that their // dependencies are vendored. If any imported package is missing, // 'go list -deps' will fail when attempting to load it. cmd := testenv.Command(t, goBin, "list", "-mod=vendor", "-deps", "./...") cmd.Dir = m.Dir cmd.Env = append(cmd.Environ(), "GO111MODULE=on", "GOWORK=off") cmd.Stderr = new(strings.Builder) _, err := cmd.Output() if err != nil { t.Errorf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) t.Logf("(Run 'go mod vendor' in %s to ensure that dependencies have been vendored.)", m.Dir) } return } // There is no vendor directory, so the module must have no dependencies. // Check that the list of active modules contains only the main module. cmd := testenv.Command(t, goBin, "list", "-mod=readonly", "-m", "all") cmd.Dir = m.Dir cmd.Env = append(cmd.Environ(), "GO111MODULE=on", "GOWORK=off") cmd.Stderr = new(strings.Builder) out, err := cmd.Output() if err != nil { t.Fatalf("%s: %v\n%s", strings.Join(cmd.Args, " "), err, cmd.Stderr) } if strings.TrimSpace(string(out)) != m.Path { t.Errorf("'%s' reported active modules other than %s:\n%s", strings.Join(cmd.Args, " "), m.Path, out) t.Logf("(Run 'go mod tidy' in %s to ensure that no extraneous dependencies were added, or 'go mod vendor' to copy in imported packages.)", m.Dir) } }) } // We now get to the slow, but more thorough part of the test. // Only run it in long test mode. if testing.Short() { return } // Ensure that all modules within GOROOT are tidy, vendored, and bundled. // Ensure that the vendored contents match the unmodified contents of the // corresponding dependency versions. // // The non-short section of this test requires network access and the diff // command. // // It makes a temporary copy of the entire GOROOT tree (where it can safely // perform operations that may mutate the tree), executes the same module // maintenance commands that we expect Go developers to run, and then // diffs the potentially modified module copy with the real one in GOROOT. // (We could try to rely on Git to do things differently, but that's not the // path we've chosen at this time. This allows the test to run when the tree // is not checked into Git.) testenv.MustHaveExternalNetwork(t) if haveDiff := func() bool { diff, err := testenv.Command(t, "diff", "--recursive", "--unified", ".", ".").CombinedOutput() if err != nil || len(diff) != 0 { return false } diff, err = testenv.Command(t, "diff", "--recursive", "--unified", ".", "..").CombinedOutput() if err == nil || len(diff) == 0 { return false } return true }(); !haveDiff { // For now, the diff command is a mandatory dependency of this test. // This test will primarily run on longtest builders, since few people // would test the cmd/internal/moddeps package directly, and all.bash // runs tests in short mode. It's fine to skip if diff is unavailable. t.Skip("skipping because a diff command with support for --recursive and --unified flags is unavailable") } // We're going to check the standard modules for tidiness, so we need a usable // GOMODCACHE. If the default directory doesn't exist, use a temporary // directory instead. (That can occur, for example, when running under // run.bash with GO_TEST_SHORT=0: run.bash sets GOPATH=/nonexist-gopath, and // GO_TEST_SHORT=0 causes it to run this portion of the test.) var modcacheEnv []string { out, err := testenv.Command(t, goBin, "env", "GOMODCACHE").Output() if err != nil { t.Fatalf("%s env GOMODCACHE: %v", goBin, err) } modcacheOk := false if gomodcache := string(bytes.TrimSpace(out)); gomodcache != "" { if _, err := os.Stat(gomodcache); err == nil { modcacheOk = true } } if !modcacheOk { modcacheEnv = []string{ "GOMODCACHE=" + t.TempDir(), "GOFLAGS=" + os.Getenv("GOFLAGS") + " -modcacherw", // Allow t.TempDir() to clean up subdirectories. } } } // Build the bundle binary at the golang.org/x/tools // module version specified in GOROOT/src/cmd/go.mod. bundleDir := t.TempDir() r := runner{ Dir: filepath.Join(testenv.GOROOT(t), "src/cmd"), Env: append(os.Environ(), modcacheEnv...), } r.run(t, goBin, "build", "-mod=readonly", "-o", bundleDir, "golang.org/x/tools/cmd/bundle") var gorootCopyDir string for _, m := range findGorootModules(t) { // Create a test-wide GOROOT copy. It can be created once // and reused between subtests whenever they don't fail. // // This is a relatively expensive operation, but it's a pre-requisite to // be able to safely run commands like "go mod tidy", "go mod vendor", and // "go generate" on the GOROOT tree content. Those commands may modify the // tree, and we don't want to happen to the real tree as part of executing // a test. if gorootCopyDir == "" { gorootCopyDir = makeGOROOTCopy(t) } t.Run(m.Path+"(thorough)", func(t *testing.T) { t.Logf("module %s in directory %s", m.Path, m.Dir) defer func() { if t.Failed() { // The test failed, which means it's possible the GOROOT copy // may have been modified. No choice but to reset it for next // module test case. (This is slow, but it happens only during // test failures.) gorootCopyDir = "" } }() rel, err := filepath.Rel(testenv.GOROOT(t), m.Dir) if err != nil { t.Fatalf("filepath.Rel(%q, %q): %v", testenv.GOROOT(t), m.Dir, err) } r := runner{ Dir: filepath.Join(gorootCopyDir, rel), Env: append(append(os.Environ(), modcacheEnv...), // Set GOROOT. "GOROOT="+gorootCopyDir, // Add GOROOTcopy/bin and bundleDir to front of PATH. "PATH="+filepath.Join(gorootCopyDir, "bin")+string(filepath.ListSeparator)+ bundleDir+string(filepath.ListSeparator)+os.Getenv("PATH"), "GOWORK=off", ), } goBinCopy := filepath.Join(gorootCopyDir, "bin", "go") r.run(t, goBinCopy, "mod", "tidy") // See issue 43687. r.run(t, goBinCopy, "mod", "verify") // Verify should be a no-op, but test it just in case. r.run(t, goBinCopy, "mod", "vendor") // See issue 36852. pkgs := packagePattern(m.Path) r.run(t, goBinCopy, "generate", `-run=^//go:generate bundle `, pkgs) // See issue 41409. advice := "$ cd " + m.Dir + "\n" + "$ go mod tidy # to remove extraneous dependencies\n" + "$ go mod vendor # to vendor dependencies\n" + "$ go generate -run=bundle " + pkgs + " # to regenerate bundled packages\n" if m.Path == "std" { r.run(t, goBinCopy, "generate", "syscall", "internal/syscall/...") // See issue 43440. advice += "$ go generate syscall internal/syscall/... # to regenerate syscall packages\n" } // TODO(golang.org/issue/43440): Check anything else influenced by dependency versions. diff, err := testenv.Command(t, "diff", "--recursive", "--unified", r.Dir, m.Dir).CombinedOutput() if err != nil || len(diff) != 0 { t.Errorf(`Module %s in %s is not tidy (-want +got): %s To fix it, run: %s (If module %[1]s is definitely tidy, this could mean there's a problem in the go or bundle command.)`, m.Path, m.Dir, diff, advice) } }) } } // packagePattern returns a package pattern that matches all packages // in the module modulePath, and ideally as few others as possible. func packagePattern(modulePath string) string { if modulePath == "std" { return "std" } return modulePath + "/..." } // makeGOROOTCopy makes a temporary copy of the current GOROOT tree. // The goal is to allow the calling test t to safely mutate a GOROOT // copy without also modifying the original GOROOT. // // It copies the entire tree as is, with the exception of the GOROOT/.git // directory, which is skipped, and the GOROOT/{bin,pkg} directories, // which are symlinked. This is done for speed, since a GOROOT tree is // functional without being in a Git repository, and bin and pkg are // deemed safe to share for the purpose of the TestAllDependencies test. func makeGOROOTCopy(t *testing.T) string { t.Helper() gorootCopyDir := t.TempDir() err := filepath.Walk(testenv.GOROOT(t), func(src string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() && src == filepath.Join(testenv.GOROOT(t), ".git") { return filepath.SkipDir } rel, err := filepath.Rel(testenv.GOROOT(t), src) if err != nil { return fmt.Errorf("filepath.Rel(%q, %q): %v", testenv.GOROOT(t), src, err) } dst := filepath.Join(gorootCopyDir, rel) if info.IsDir() && (src == filepath.Join(testenv.GOROOT(t), "bin") || src == filepath.Join(testenv.GOROOT(t), "pkg")) { // If the OS supports symlinks, use them instead // of copying the bin and pkg directories. if err := os.Symlink(src, dst); err == nil { return filepath.SkipDir } } perm := info.Mode() & os.ModePerm if info.Mode()&os.ModeSymlink != 0 { info, err = os.Stat(src) if err != nil { return err } perm = info.Mode() & os.ModePerm } // If it's a directory, make a corresponding directory. if info.IsDir() { return os.MkdirAll(dst, perm|0200) } // Copy the file bytes. // We can't create a symlink because the file may get modified; // we need to ensure that only the temporary copy is affected. s, err := os.Open(src) if err != nil { return err } defer s.Close() d, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) if err != nil { return err } _, err = io.Copy(d, s) if err != nil { d.Close() return err } return d.Close() }) if err != nil { t.Fatal(err) } t.Logf("copied GOROOT from %s to %s", testenv.GOROOT(t), gorootCopyDir) return gorootCopyDir } type runner struct { Dir string Env []string } // run runs the command and requires that it succeeds. func (r runner) run(t *testing.T, args ...string) { t.Helper() cmd := testenv.Command(t, args[0], args[1:]...) cmd.Dir = r.Dir cmd.Env = slices.Clip(r.Env) if r.Dir != "" { cmd.Env = append(cmd.Env, "PWD="+r.Dir) } out, err := cmd.CombinedOutput() if err != nil { t.Logf("> %s\n", strings.Join(args, " ")) t.Fatalf("command failed: %s\n%s", err, out) } } // TestDependencyVersionsConsistent verifies that each module in GOROOT that // requires a given external dependency requires the same version of that // dependency. // // This property allows us to maintain a single release branch of each such // dependency, minimizing the number of backports needed to pull in critical // fixes. It also ensures that any bug detected and fixed in one GOROOT module // (such as "std") is fixed in all other modules (such as "cmd") as well. func TestDependencyVersionsConsistent(t *testing.T) { // Collect the dependencies of all modules in GOROOT, indexed by module path. type requirement struct { Required module.Version Replacement module.Version } seen := map[string]map[requirement][]gorootModule{} // module path → requirement → set of modules with that requirement for _, m := range findGorootModules(t) { if !m.hasVendor { // TestAllDependencies will ensure that the module has no dependencies. continue } // We want this test to be able to run offline and with an empty module // cache, so we verify consistency only for the module versions listed in // vendor/modules.txt. That includes all direct dependencies and all modules // that provide any imported packages. // // It's ok if there are undetected differences in modules that do not // provide imported packages: we will not have to pull in any backports of // fixes to those modules anyway. vendor, err := os.ReadFile(filepath.Join(m.Dir, "vendor", "modules.txt")) if err != nil { t.Error(err) continue } for _, line := range strings.Split(strings.TrimSpace(string(vendor)), "\n") { parts := strings.Fields(line) if len(parts) < 3 || parts[0] != "#" { continue } // This line is of the form "# module version [=> replacement [version]]". var r requirement r.Required.Path = parts[1] r.Required.Version = parts[2] if len(parts) >= 5 && parts[3] == "=>" { r.Replacement.Path = parts[4] if module.CheckPath(r.Replacement.Path) != nil { // If the replacement is a filesystem path (rather than a module path), // we don't know whether the filesystem contents have changed since // the module was last vendored. // // Fortunately, we do not currently use filesystem-local replacements // in GOROOT modules. t.Errorf("cannot check consistency for filesystem-local replacement in module %s (%s):\n%s", m.Path, m.Dir, line) } if len(parts) >= 6 { r.Replacement.Version = parts[5] } } if seen[r.Required.Path] == nil { seen[r.Required.Path] = make(map[requirement][]gorootModule) } seen[r.Required.Path][r] = append(seen[r.Required.Path][r], m) } } // Now verify that we saw only one distinct version for each module. for path, versions := range seen { if len(versions) > 1 { t.Errorf("Modules within GOROOT require different versions of %s.", path) for r, mods := range versions { desc := new(strings.Builder) desc.WriteString(r.Required.Version) if r.Replacement.Path != "" { fmt.Fprintf(desc, " => %s", r.Replacement.Path) if r.Replacement.Version != "" { fmt.Fprintf(desc, " %s", r.Replacement.Version) } } for _, m := range mods { t.Logf("%s\trequires %v", m.Path, desc) } } } } } type gorootModule struct { Path string Dir string hasVendor bool } // findGorootModules returns the list of modules found in the GOROOT source tree. func findGorootModules(t *testing.T) []gorootModule { t.Helper() goBin := testenv.GoToolPath(t) goroot.once.Do(func() { // If the root itself is a symlink to a directory, // we want to follow it (see https://go.dev/issue/64375). // Add a trailing separator to force that to happen. root := testenv.GOROOT(t) if !os.IsPathSeparator(root[len(root)-1]) { root += string(filepath.Separator) } goroot.err = filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error { if err != nil { return err } if info.IsDir() && path != root && (info.Name() == "vendor" || info.Name() == "testdata") { return filepath.SkipDir } if info.IsDir() && path == filepath.Join(testenv.GOROOT(t), "pkg") { // GOROOT/pkg contains generated artifacts, not source code. // // In https://golang.org/issue/37929 it was observed to somehow contain // a module cache, so it is important to skip. (That helps with the // running time of this test anyway.) return filepath.SkipDir } if info.IsDir() && path != root && (strings.HasPrefix(info.Name(), "_") || strings.HasPrefix(info.Name(), ".")) { // _ and . prefixed directories can be used for internal modules // without a vendor directory that don't contribute to the build // but might be used for example as code generators. return filepath.SkipDir } if info.IsDir() || info.Name() != "go.mod" { return nil } dir := filepath.Dir(path) // Use 'go list' to describe the module contained in this directory (but // not its dependencies). cmd := testenv.Command(t, goBin, "list", "-json", "-m") cmd.Dir = dir cmd.Env = append(cmd.Environ(), "GO111MODULE=on", "GOWORK=off") cmd.Stderr = new(strings.Builder) out, err := cmd.Output() if err != nil { return fmt.Errorf("'go list -json -m' in %s: %w\n%s", dir, err, cmd.Stderr) } var m gorootModule if err := json.Unmarshal(out, &m); err != nil { return fmt.Errorf("decoding 'go list -json -m' in %s: %w", dir, err) } if m.Path == "" || m.Dir == "" { return fmt.Errorf("'go list -json -m' in %s failed to populate Path and/or Dir", dir) } if _, err := os.Stat(filepath.Join(dir, "vendor")); err == nil { m.hasVendor = true } goroot.modules = append(goroot.modules, m) return nil }) if goroot.err != nil { return } // knownGOROOTModules is a hard-coded list of modules that are known to exist in GOROOT. // If findGorootModules doesn't find a module, it won't be covered by tests at all, // so make sure at least these modules are found. See issue 46254. If this list // becomes a nuisance to update, can be replaced with len(goroot.modules) check. knownGOROOTModules := [...]string{ "std", "cmd", // The "misc" module sometimes exists, but cmd/distpack intentionally removes it. } var seen = make(map[string]bool) // Key is module path. for _, m := range goroot.modules { seen[m.Path] = true } for _, m := range knownGOROOTModules { if !seen[m] { goroot.err = fmt.Errorf("findGorootModules didn't find the well-known module %q", m) break } } sort.Slice(goroot.modules, func(i, j int) bool { return goroot.modules[i].Dir < goroot.modules[j].Dir }) }) if goroot.err != nil { t.Fatal(goroot.err) } return goroot.modules } // goroot caches the list of modules found in the GOROOT source tree. var goroot struct { once sync.Once modules []gorootModule err error }
go/src/cmd/internal/moddeps/moddeps_test.go/0
{ "file_path": "go/src/cmd/internal/moddeps/moddeps_test.go", "repo_id": "go", "token_count": 7132 }
154
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package obj // go-specific code shared across loaders (5l, 6l, 8l). func Nopout(p *Prog) { p.As = ANOP p.Scond = 0 p.From = Addr{} p.RestArgs = nil p.Reg = 0 p.To = Addr{} }
go/src/cmd/internal/obj/go.go/0
{ "file_path": "go/src/cmd/internal/obj/go.go", "repo_id": "go", "token_count": 131 }
155
// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package mips import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "fmt" "log" "sort" ) // ctxt0 holds state while assembling a single function. // Each function gets a fresh ctxt0. // This allows for multiple functions to be safely concurrently assembled. type ctxt0 struct { ctxt *obj.Link newprog obj.ProgAlloc cursym *obj.LSym autosize int32 instoffset int64 pc int64 } // Instruction layout. const ( mips64FuncAlign = 8 ) const ( r0iszero = 1 ) type Optab struct { as obj.As a1 uint8 a2 uint8 a3 uint8 type_ int8 size int8 param int16 family sys.ArchFamily // 0 means both sys.MIPS and sys.MIPS64 flag uint8 } const ( // Optab.flag NOTUSETMP = 1 << iota // p expands to multiple instructions, but does NOT use REGTMP ) var optab = []Optab{ {obj.ATEXT, C_LEXT, C_NONE, C_TEXTSIZE, 0, 0, 0, sys.MIPS64, 0}, {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0, 0}, {AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, sys.MIPS64, NOTUSETMP}, {ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {AADDV, C_REG, C_REG, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ACMOVN, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.MIPS64, 0}, {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, {ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, sys.MIPS64, 0}, {ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, sys.MIPS64, 0}, {ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, {AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0, 0}, {AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0, 0}, {ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0, 0}, {AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, {AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, {ASCV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, {AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, {ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, {AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVW, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVWU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVV, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVB, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVWU, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVV, C_REG, C_NONE, C_TLS, 53, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVB, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_REG, C_NONE, C_TLS, 53, 8, 0, 0, NOTUSETMP}, {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0}, {AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.MIPS64, 0}, {AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, {AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0}, {AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.MIPS64, 0}, {AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVW, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVWU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVV, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVB, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.MIPS, 0}, {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 12, 0, sys.MIPS64, 0}, {AMOVW, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVWU, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVV, C_TLS, C_NONE, C_REG, 54, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVB, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVBU, C_TLS, C_NONE, C_REG, 54, 8, 0, 0, NOTUSETMP}, {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0}, {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0, 0}, {AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, sys.MIPS64, 0}, {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, REGSB, sys.MIPS, NOTUSETMP}, {AMOVW, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP}, {AMOVV, C_LECON, C_NONE, C_REG, 52, 12, REGSB, sys.MIPS64, NOTUSETMP}, {AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0, 0}, {AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, sys.MIPS64, 0}, {AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, {AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, {AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.MIPS64, 0}, {AMOVW, C_STCON, C_NONE, C_REG, 55, 8, 0, 0, NOTUSETMP}, {AMOVV, C_STCON, C_NONE, C_REG, 55, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0, 0}, {AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, sys.MIPS64, 0}, {AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0, NOTUSETMP}, {AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, sys.MIPS64, NOTUSETMP}, {AMOVW, C_HI, C_NONE, C_REG, 20, 4, 0, 0, 0}, {AMOVV, C_HI, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0}, {AMOVW, C_LO, C_NONE, C_REG, 20, 4, 0, 0, 0}, {AMOVV, C_LO, C_NONE, C_REG, 20, 4, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_HI, 21, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_HI, 21, 4, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_LO, 21, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_LO, 21, 4, 0, sys.MIPS64, 0}, {AMUL, C_REG, C_REG, C_NONE, 22, 4, 0, 0, 0}, {AMUL, C_REG, C_REG, C_REG, 22, 4, 0, 0, 0}, {AMULV, C_REG, C_REG, C_NONE, 22, 4, 0, sys.MIPS64, 0}, {AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, {AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, {AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, {AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, {AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, sys.MIPS64, 0}, {AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, sys.MIPS64, 0}, {AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, sys.MIPS64, 0}, {AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, sys.MIPS64, 0}, {AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, {AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, {AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, {AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, {AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, {AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, {AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, sys.MIPS64, 0}, {AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, sys.MIPS64, 0}, {AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, {AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, {AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, {AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, sys.MIPS64, 0}, {AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, {AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, {AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, sys.MIPS64, 0}, {AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, {ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0, 0}, {ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0, 0}, {ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, sys.MIPS64, 0}, {ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, sys.MIPS64, 0}, {ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, {ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0, 0}, {ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, {ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, {ABFPT, C_NONE, C_NONE, C_SBRA, 6, 8, 0, 0, NOTUSETMP}, {AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, {AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0}, {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0}, {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, sys.MIPS64, 0}, {AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, {AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, {AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, sys.MIPS64, 0}, {AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, {AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, {AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, sys.MIPS64, 0}, {AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, {AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, {AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, sys.MIPS64, 0}, {AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, {AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0}, {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0}, {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.MIPS, 0}, {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 12, 0, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, REGSB, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, {AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, {AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, {AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, {AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, REGSB, sys.MIPS64, 0}, {AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, {AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, {AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, sys.MIPS64, 0}, {AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, {AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.MIPS, 0}, {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 12, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0, 0}, {AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, sys.MIPS64, 0}, {AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, sys.MIPS64, 0}, {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0}, {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.MIPS64, 0}, {AMOVW, C_REG, C_NONE, C_MREG, 37, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_MREG, 37, 4, 0, sys.MIPS64, 0}, {AMOVW, C_MREG, C_NONE, C_REG, 38, 4, 0, 0, 0}, {AMOVV, C_MREG, C_NONE, C_REG, 38, 4, 0, sys.MIPS64, 0}, {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_FCREG, 41, 4, 0, 0, 0}, {AMOVV, C_REG, C_NONE, C_FCREG, 41, 4, 0, sys.MIPS64, 0}, {AMOVW, C_FCREG, C_NONE, C_REG, 42, 4, 0, 0, 0}, {AMOVV, C_FCREG, C_NONE, C_REG, 42, 4, 0, sys.MIPS64, 0}, {ATEQ, C_SCON, C_REG, C_REG, 15, 4, 0, 0, 0}, {ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0}, {ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0}, {AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0}, {AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0}, {AWSBH, C_REG, C_NONE, C_REG, 59, 4, 0, 0, 0}, {ADSBH, C_REG, C_NONE, C_REG, 59, 4, 0, sys.MIPS64, 0}, {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0}, {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0}, {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}, {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, {obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, {obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP {obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, } var oprange [ALAST & obj.AMask][]Optab var xcmp [C_NCLASS][C_NCLASS]bool func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if ctxt.Retpoline { ctxt.Diag("-spectre=ret not supported on mips") ctxt.Retpoline = false // don't keep printing } p := cursym.Func().Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.Arch.FixedFrameSize)} if oprange[AOR&obj.AMask] == nil { c.ctxt.Diag("mips ops not initialized, call mips.buildop first") } pc := int64(0) p.Pc = pc var m int var o *Optab for p = p.Link; p != nil; p = p.Link { p.Pc = pc o = c.oplook(p) m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { c.ctxt.Diag("zero-width instruction\n%v", p) } continue } pc += int64(m) } c.cursym.Size = pc /* * if any procedure is large enough to * generate a large SBRA branch, then * generate extra passes putting branches * around jmps to fix. this is rare. */ bflag := 1 var otxt int64 var q *obj.Prog for bflag != 0 { bflag = 0 pc = 0 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { p.Pc = pc o = c.oplook(p) // very large conditional branches if o.type_ == 6 && p.To.Target() != nil { otxt = p.To.Target().Pc - pc if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH q.To.SetTarget(p.To.Target()) p.To.SetTarget(q) q = c.newprog() q.Link = p.Link p.Link = q q.As = AJMP q.Pos = p.Pos q.To.Type = obj.TYPE_BRANCH q.To.SetTarget(q.Link.Link) c.addnop(p.Link) c.addnop(p) bflag = 1 } } m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { c.ctxt.Diag("zero-width instruction\n%v", p) } continue } pc += int64(m) } c.cursym.Size = pc } if c.ctxt.Arch.Family == sys.MIPS64 { pc += -pc & (mips64FuncAlign - 1) } c.cursym.Size = pc /* * lay out the code, emitting code and data relocations. */ c.cursym.Grow(c.cursym.Size) bp := c.cursym.P var i int32 var out [4]uint32 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { c.pc = p.Pc o = c.oplook(p) if int(o.size) > 4*len(out) { log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p) } c.asmout(p, o, out[:]) for i = 0; i < int32(o.size/4); i++ { c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) bp = bp[4:] } } // Mark nonpreemptible instruction sequences. // We use REGTMP as a scratch register during call injection, // so instruction sequences that use REGTMP are unsafe to // preempt asynchronously. obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable) } // isUnsafePoint returns whether p is an unsafe point. func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool { // If p explicitly uses REGTMP, it's unsafe to preempt, because the // preemption sequence clobbers REGTMP. return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP } // isRestartable returns whether p is a multi-instruction sequence that, // if preempted, can be restarted. func (c *ctxt0) isRestartable(p *obj.Prog) bool { if c.isUnsafePoint(p) { return false } // If p is a multi-instruction sequence with uses REGTMP inserted by // the assembler in order to materialize a large constant/offset, we // can restart p (at the start of the instruction sequence), recompute // the content of REGTMP, upon async preemption. Currently, all cases // of assembler-inserted REGTMP fall into this category. // If p doesn't use REGTMP, it can be simply preempted, so we don't // mark it. o := c.oplook(p) return o.size > 4 && o.flag&NOTUSETMP == 0 } func isint32(v int64) bool { return int64(int32(v)) == v } func isuint32(v uint64) bool { return uint64(uint32(v)) == v } func (c *ctxt0) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: return C_NONE case obj.TYPE_REG: if REG_R0 <= a.Reg && a.Reg <= REG_R31 { return C_REG } if REG_F0 <= a.Reg && a.Reg <= REG_F31 { return C_FREG } if REG_M0 <= a.Reg && a.Reg <= REG_M31 { return C_MREG } if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 { return C_FCREG } if REG_W0 <= a.Reg && a.Reg <= REG_W31 { return C_WREG } if a.Reg == REG_LO { return C_LO } if a.Reg == REG_HI { return C_HI } return C_GOK case obj.TYPE_MEM: switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: if a.Sym == nil { break } c.instoffset = a.Offset if a.Sym != nil { // use relocation if a.Sym.Type == objabi.STLSBSS { return C_TLS } return C_ADDR } return C_LEXT case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO } return C_LAUTO case obj.NAME_NONE: c.instoffset = a.Offset if c.instoffset == 0 { return C_ZOREG } if c.instoffset >= -BIG && c.instoffset < BIG { return C_SOREG } return C_LOREG } return C_GOK case obj.TYPE_TEXTSIZE: return C_TEXTSIZE case obj.TYPE_CONST, obj.TYPE_ADDR: switch a.Name { case obj.NAME_NONE: c.instoffset = a.Offset if a.Reg != obj.REG_NONE { if -BIG <= c.instoffset && c.instoffset <= BIG { return C_SACON } if isint32(c.instoffset) { return C_LACON } return C_DACON } case obj.NAME_EXTERN, obj.NAME_STATIC: s := a.Sym if s == nil { return C_GOK } c.instoffset = a.Offset if s.Type == objabi.STLSBSS { return C_STCON // address of TLS variable } return C_LECON case obj.NAME_AUTO: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON case obj.NAME_PARAM: if a.Reg == REGSP { // unset base register for better printing, since // a.Offset is still relative to pseudo-FP. a.Reg = obj.REG_NONE } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON } return C_LACON default: return C_GOK } if c.instoffset >= 0 { if c.instoffset == 0 { return C_ZCON } if c.instoffset <= 0x7fff { return C_SCON } if c.instoffset <= 0xffff { return C_ANDCON } if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && ((instoffset & (1<<31)) == 0) */ return C_UCON } if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { return C_LCON } return C_LCON // C_DCON } if c.instoffset >= -0x8000 { return C_ADDCON } if c.instoffset&0xffff == 0 && isint32(c.instoffset) { return C_UCON } if isint32(c.instoffset) { return C_LCON } return C_LCON // C_DCON case obj.TYPE_BRANCH: return C_SBRA } return C_GOK } func prasm(p *obj.Prog) { fmt.Printf("%v\n", p) } func (c *ctxt0) oplook(p *obj.Prog) *Optab { if oprange[AOR&obj.AMask] == nil { c.ctxt.Diag("mips ops not initialized, call mips.buildop first") } a1 := int(p.Optab) if a1 != 0 { return &optab[a1-1] } a1 = int(p.From.Class) if a1 == 0 { a1 = c.aclass(&p.From) + 1 p.From.Class = int8(a1) } a1-- a3 := int(p.To.Class) if a3 == 0 { a3 = c.aclass(&p.To) + 1 p.To.Class = int8(a3) } a3-- a2 := C_NONE if p.Reg != obj.REG_NONE { a2 = C_REG } ops := oprange[p.As&obj.AMask] c1 := &xcmp[a1] c3 := &xcmp[a3] for i := range ops { op := &ops[i] if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) return op } } c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3)) prasm(p) // Turn illegal instruction into an UNDEF, avoid crashing in asmout. return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0} } func cmp(a int, b int) bool { if a == b { return true } switch a { case C_LCON: if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { return true } case C_ADD0CON: if b == C_ADDCON { return true } fallthrough case C_ADDCON: if b == C_ZCON || b == C_SCON { return true } case C_AND0CON: if b == C_ANDCON { return true } fallthrough case C_ANDCON: if b == C_ZCON || b == C_SCON { return true } case C_UCON: if b == C_ZCON { return true } case C_SCON: if b == C_ZCON { return true } case C_LACON: if b == C_SACON { return true } case C_LBRA: if b == C_SBRA { return true } case C_LEXT: if b == C_SEXT { return true } case C_LAUTO: if b == C_SAUTO { return true } case C_REG: if b == C_ZCON { return r0iszero != 0 /*TypeKind(100016)*/ } case C_LOREG: if b == C_ZOREG || b == C_SOREG { return true } case C_SOREG: if b == C_ZOREG { return true } } return false } type ocmp []Optab func (x ocmp) Len() int { return len(x) } func (x ocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x ocmp) Less(i, j int) bool { p1 := &x[i] p2 := &x[j] n := int(p1.as) - int(p2.as) if n != 0 { return n < 0 } n = int(p1.a1) - int(p2.a1) if n != 0 { return n < 0 } n = int(p1.a2) - int(p2.a2) if n != 0 { return n < 0 } n = int(p1.a3) - int(p2.a3) if n != 0 { return n < 0 } return false } func opset(a, b0 obj.As) { oprange[a&obj.AMask] = oprange[b0] } func buildop(ctxt *obj.Link) { if oprange[AOR&obj.AMask] != nil { // Already initialized; stop now. // This happens in the cmd/asm tests, // each of which re-initializes the arch. return } var n int for i := 0; i < C_NCLASS; i++ { for n = 0; n < C_NCLASS; n++ { if cmp(n, i) { xcmp[i][n] = true } } } for n = 0; optab[n].as != obj.AXXX; n++ { } sort.Sort(ocmp(optab[:n])) for i := 0; i < n; i++ { r := optab[i].as r0 := r & obj.AMask start := i for optab[i].as == r { i++ } oprange[r0] = optab[start:i] i-- switch r { default: ctxt.Diag("unknown op in build: %v", r) ctxt.DiagFlush() log.Fatalf("bad code") case AABSF: opset(AMOVFD, r0) opset(AMOVDF, r0) opset(AMOVWF, r0) opset(AMOVFW, r0) opset(AMOVWD, r0) opset(AMOVDW, r0) opset(ANEGF, r0) opset(ANEGD, r0) opset(AABSD, r0) opset(ATRUNCDW, r0) opset(ATRUNCFW, r0) opset(ASQRTF, r0) opset(ASQRTD, r0) case AMOVVF: opset(AMOVVD, r0) opset(AMOVFV, r0) opset(AMOVDV, r0) opset(ATRUNCDV, r0) opset(ATRUNCFV, r0) case AADD: opset(ASGT, r0) opset(ASGTU, r0) opset(AADDU, r0) case AADDV: opset(AADDVU, r0) case AADDF: opset(ADIVF, r0) opset(ADIVD, r0) opset(AMULF, r0) opset(AMULD, r0) opset(ASUBF, r0) opset(ASUBD, r0) opset(AADDD, r0) case AAND: opset(AOR, r0) opset(AXOR, r0) case ABEQ: opset(ABNE, r0) case ABLEZ: opset(ABGEZ, r0) opset(ABGEZAL, r0) opset(ABLTZ, r0) opset(ABLTZAL, r0) opset(ABGTZ, r0) case AMOVB: opset(AMOVH, r0) case AMOVBU: opset(AMOVHU, r0) case AMUL: opset(AREM, r0) opset(AREMU, r0) opset(ADIVU, r0) opset(AMULU, r0) opset(ADIV, r0) opset(AMADD, r0) opset(AMSUB, r0) case AMULV: opset(ADIVV, r0) opset(ADIVVU, r0) opset(AMULVU, r0) opset(AREMV, r0) opset(AREMVU, r0) case ASLL: opset(ASRL, r0) opset(ASRA, r0) opset(AROTR, r0) case ASLLV: opset(ASRAV, r0) opset(ASRLV, r0) opset(AROTRV, r0) case ASUB: opset(ASUBU, r0) opset(ANOR, r0) case ASUBV: opset(ASUBVU, r0) case ASYSCALL: opset(ASYNC, r0) opset(ANOOP, r0) opset(ATLBP, r0) opset(ATLBR, r0) opset(ATLBWI, r0) opset(ATLBWR, r0) case ACMPEQF: opset(ACMPGTF, r0) opset(ACMPGTD, r0) opset(ACMPGEF, r0) opset(ACMPGED, r0) opset(ACMPEQD, r0) case ABFPT: opset(ABFPF, r0) case AMOVWL: opset(AMOVWR, r0) case AMOVVL: opset(AMOVVR, r0) case AVMOVB: opset(AVMOVH, r0) opset(AVMOVW, r0) opset(AVMOVD, r0) case AMOVW, AMOVD, AMOVF, AMOVV, ABREAK, ARFE, AJAL, AJMP, AMOVWU, ALL, ALLV, ASC, ASCV, ANEGW, ANEGV, AWORD, obj.ANOP, obj.ATEXT, obj.AUNDEF, obj.AFUNCDATA, obj.APCDATA, obj.ADUFFZERO, obj.ADUFFCOPY: break case ACMOVN: opset(ACMOVZ, r0) case ACMOVT: opset(ACMOVF, r0) case ACLO: opset(ACLZ, r0) case ATEQ: opset(ATNE, r0) case AWSBH: opset(ASEB, r0) opset(ASEH, r0) case ADSBH: opset(ADSHD, r0) } } } func OP(x uint32, y uint32) uint32 { return x<<3 | y<<0 } func SP(x uint32, y uint32) uint32 { return x<<29 | y<<26 } func BCOND(x uint32, y uint32) uint32 { return x<<19 | y<<16 } func MMU(x uint32, y uint32) uint32 { return SP(2, 0) | 16<<21 | x<<3 | y<<0 } func FPF(x uint32, y uint32) uint32 { return SP(2, 1) | 16<<21 | x<<3 | y<<0 } func FPD(x uint32, y uint32) uint32 { return SP(2, 1) | 17<<21 | x<<3 | y<<0 } func FPW(x uint32, y uint32) uint32 { return SP(2, 1) | 20<<21 | x<<3 | y<<0 } func FPV(x uint32, y uint32) uint32 { return SP(2, 1) | 21<<21 | x<<3 | y<<0 } func OP_RRR(op uint32, r1 int16, r2 int16, r3 int16) uint32 { return op | uint32(r1&31)<<16 | uint32(r2&31)<<21 | uint32(r3&31)<<11 } func OP_IRR(op uint32, i uint32, r2 int16, r3 int16) uint32 { return op | i&0xFFFF | uint32(r2&31)<<21 | uint32(r3&31)<<16 } func OP_SRR(op uint32, s uint32, r2 int16, r3 int16) uint32 { return op | (s&31)<<6 | uint32(r2&31)<<16 | uint32(r3&31)<<11 } func OP_FRRR(op uint32, r1 int16, r2 int16, r3 int16) uint32 { return op | uint32(r1&31)<<16 | uint32(r2&31)<<11 | uint32(r3&31)<<6 } func OP_JMP(op uint32, i uint32) uint32 { return op | i&0x3FFFFFF } func OP_VI10(op uint32, df uint32, s10 int32, wd uint32, minor uint32) uint32 { return 0x1e<<26 | (op&7)<<23 | (df&3)<<21 | uint32(s10&0x3FF)<<11 | (wd&31)<<6 | minor&0x3F } func OP_VMI10(s10 int32, rs uint32, wd uint32, minor uint32, df uint32) uint32 { return 0x1e<<26 | uint32(s10&0x3FF)<<16 | (rs&31)<<11 | (wd&31)<<6 | (minor&15)<<2 | df&3 } func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 := uint32(0) o2 := uint32(0) o3 := uint32(0) o4 := uint32(0) add := AADDU if c.ctxt.Arch.Family == sys.MIPS64 { add = AADDVU } switch o.type_ { default: c.ctxt.Diag("unknown type %d %v", o.type_) prasm(p) case 0: /* pseudo ops */ break case 1: /* mov r1,r2 ==> OR r1,r0,r2 */ a := AOR if p.As == AMOVW && c.ctxt.Arch.Family == sys.MIPS64 { // on MIPS64, most of the 32-bit instructions have unpredictable behavior, // but SLL is special that the result is always sign-extended to 64-bit. a = ASLL } o1 = OP_RRR(c.oprrr(a), p.From.Reg, REGZERO, p.To.Reg) case 2: /* add/sub r1,[r2],r3 */ r := p.Reg if p.As == ANEGW || p.As == ANEGV { r = REGZERO } if r == obj.REG_NONE { r = p.To.Reg } o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, r, p.To.Reg) case 3: /* mov $soreg, r ==> or/add $i,o,r */ a := add if o.a1 == C_ANDCON { a = AOR } r := p.From.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(a), uint32(v), r, p.To.Reg) case 4: /* add $scon,[r1],r2 */ r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(p.As), uint32(v), r, p.To.Reg) case 5: /* syscall */ o1 = c.oprrr(p.As) case 6: /* beq r1,[r2],sbra */ v := int32(0) if p.To.Target() == nil { v = int32(-4) >> 2 } else { v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { c.ctxt.Diag("short branch too far\n%v", p) } o1 = OP_IRR(c.opirr(p.As), uint32(v), p.From.Reg, p.Reg) // for ABFPT and ABFPF only: always fill delay slot with 0 // see comments in func preprocess for details. o2 = 0 case 7: /* mov r, soreg ==> sw o(r) */ r := p.To.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.To) o1 = OP_IRR(c.opirr(p.As), uint32(v), r, p.From.Reg) case 8: /* mov soreg, r ==> lw o(r) */ r := p.From.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(-p.As), uint32(v), r, p.To.Reg) case 9: /* sll r1,[r2],r3 */ r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } o1 = OP_RRR(c.oprrr(p.As), r, p.From.Reg, p.To.Reg) case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */ v := c.regoff(&p.From) a := AOR if v < 0 { a = AADDU } o1 = OP_IRR(c.opirr(a), uint32(v), obj.REG_NONE, REGTMP) r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } o2 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg) case 11: /* jmp lbra */ v := int32(0) if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra if p.To.Target() == nil { v = int32(-4) >> 2 } else { v = int32(p.To.Target().Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(c.opirr(ABEQ), uint32(v), REGZERO, REGZERO) break } } if p.To.Target() == nil { v = int32(p.Pc) >> 2 } else { v = int32(p.To.Target().Pc) >> 2 } o1 = OP_JMP(c.opirr(p.As), uint32(v)) if p.To.Sym == nil { p.To.Sym = c.cursym.Func().Text.From.Sym p.To.Offset = p.To.Target().Pc } rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset if p.As == AJAL { rel.Type = objabi.R_CALLMIPS } else { rel.Type = objabi.R_JMPMIPS } case 12: /* movbs r,r */ // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. v := 16 if p.As == AMOVB { v = 24 } o1 = OP_SRR(c.opirr(ASLL), uint32(v), p.From.Reg, p.To.Reg) o2 = OP_SRR(c.opirr(ASRA), uint32(v), p.To.Reg, p.To.Reg) case 13: /* movbu r,r */ if p.As == AMOVBU { o1 = OP_IRR(c.opirr(AAND), uint32(0xff), p.From.Reg, p.To.Reg) } else { o1 = OP_IRR(c.opirr(AAND), uint32(0xffff), p.From.Reg, p.To.Reg) } case 14: /* movwu r,r */ // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. o1 = OP_SRR(c.opirr(-ASLLV), 0, p.From.Reg, p.To.Reg) o2 = OP_SRR(c.opirr(-ASRLV), 0, p.To.Reg, p.To.Reg) case 15: /* teq $c r,r */ r := p.Reg if r == obj.REG_NONE { r = REGZERO } v := c.regoff(&p.From) /* only use 10 bits of trap code */ o1 = OP_IRR(c.opirr(p.As), (uint32(v)&0x3FF)<<6, r, p.To.Reg) case 16: /* sll $c,[r1],r2 */ r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } v := c.regoff(&p.From) /* OP_SRR will use only the low 5 bits of the shift value */ if v >= 32 && vshift(p.As) { o1 = OP_SRR(c.opirr(-p.As), uint32(v-32), r, p.To.Reg) } else { o1 = OP_SRR(c.opirr(p.As), uint32(v), r, p.To.Reg) } case 17: o1 = OP_RRR(c.oprrr(p.As), REGZERO, p.From.Reg, p.To.Reg) case 18: /* jmp [r1],0(r2) */ r := p.Reg if r == obj.REG_NONE { r = o.param } o1 = OP_RRR(c.oprrr(p.As), obj.REG_NONE, p.To.Reg, r) if p.As == obj.ACALL { rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 0 rel.Type = objabi.R_CALLIND } case 19: /* mov $lcon,r ==> lu+or */ // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, p.To.Reg) o2 = OP_IRR(c.opirr(AOR), uint32(v), p.To.Reg, p.To.Reg) case 20: /* mov lo/hi,r */ a := OP(2, 0) /* mfhi */ if p.From.Reg == REG_LO { a = OP(2, 2) /* mflo */ } o1 = OP_RRR(a, REGZERO, REGZERO, p.To.Reg) case 21: /* mov r,lo/hi */ a := OP(2, 1) /* mthi */ if p.To.Reg == REG_LO { a = OP(2, 3) /* mtlo */ } o1 = OP_RRR(a, REGZERO, p.From.Reg, REGZERO) case 22: /* mul r1,r2 [r3]*/ if p.To.Reg != obj.REG_NONE { r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } a := SP(3, 4) | 2 /* mul */ o1 = OP_RRR(a, p.From.Reg, r, p.To.Reg) } else { o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, p.Reg, REGZERO) } case 23: /* add $lcon,r1,r2 ==> lu+or+add */ v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP) o2 = OP_IRR(c.opirr(AOR), uint32(v), REGTMP, REGTMP) r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } o3 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg) case 24: /* mov $ucon,r ==> lu r */ v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, p.To.Reg) case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */ v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP) r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } o2 = OP_RRR(c.oprrr(p.As), REGTMP, r, p.To.Reg) case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */ v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32(v>>16), REGZERO, REGTMP) o2 = OP_IRR(c.opirr(AOR), uint32(v), REGTMP, REGTMP) r := p.From.Reg if r == obj.REG_NONE { r = o.param } o3 = OP_RRR(c.oprrr(add), REGTMP, r, p.To.Reg) case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */ a := -AMOVF if p.As == AMOVD { a = -AMOVD } r := p.From.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.From) switch o.size { case 12: o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP) o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP) o3 = OP_IRR(c.opirr(a), uint32(v), REGTMP, p.To.Reg) case 4: o1 = OP_IRR(c.opirr(a), uint32(v), r, p.To.Reg) } case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */ a := AMOVF if p.As == AMOVD { a = AMOVD } r := p.To.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.To) switch o.size { case 12: o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP) o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP) o3 = OP_IRR(c.opirr(a), uint32(v), REGTMP, p.From.Reg) case 4: o1 = OP_IRR(c.opirr(a), uint32(v), r, p.From.Reg) } case 30: /* movw r,fr */ a := SP(2, 1) | (4 << 21) /* mtc1 */ o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg) case 31: /* movw fr,r */ a := SP(2, 1) | (0 << 21) /* mtc1 */ o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg) case 32: /* fadd fr1,[fr2],fr3 */ r := p.Reg if r == obj.REG_NONE { r = p.To.Reg } o1 = OP_FRRR(c.oprrr(p.As), p.From.Reg, r, p.To.Reg) case 33: /* fabs fr1, fr3 */ o1 = OP_FRRR(c.oprrr(p.As), obj.REG_NONE, p.From.Reg, p.To.Reg) case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */ a := AADDU if o.a1 == C_ANDCON { a = AOR } v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(a), uint32(v), obj.REG_NONE, REGTMP) o2 = OP_RRR(SP(2, 1)|(4<<21), REGTMP, obj.REG_NONE, p.To.Reg) /* mtc1 */ case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */ r := p.To.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.To) o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP) o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP) o3 = OP_IRR(c.opirr(p.As), uint32(v), REGTMP, p.From.Reg) case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */ r := p.From.Reg if r == obj.REG_NONE { r = o.param } v := c.regoff(&p.From) o1 = OP_IRR(c.opirr(ALUI), uint32((v+1<<15)>>16), REGZERO, REGTMP) o2 = OP_RRR(c.oprrr(add), r, REGTMP, REGTMP) o3 = OP_IRR(c.opirr(-p.As), uint32(v), REGTMP, p.To.Reg) case 37: /* movw r,mr */ a := SP(2, 0) | (4 << 21) /* mtc0 */ if p.As == AMOVV { a = SP(2, 0) | (5 << 21) /* dmtc0 */ } o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg) case 38: /* movw mr,r */ a := SP(2, 0) | (0 << 21) /* mfc0 */ if p.As == AMOVV { a = SP(2, 0) | (1 << 21) /* dmfc0 */ } o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg) case 40: /* word */ o1 = uint32(c.regoff(&p.From)) case 41: /* movw f,fcr */ o1 = OP_RRR(SP(2, 1)|(6<<21), p.From.Reg, obj.REG_NONE, p.To.Reg) /* mtcc1 */ case 42: /* movw fcr,r */ o1 = OP_RRR(SP(2, 1)|(2<<21), p.To.Reg, obj.REG_NONE, p.From.Reg) /* mfcc1 */ case 47: /* movv r,fr */ a := SP(2, 1) | (5 << 21) /* dmtc1 */ o1 = OP_RRR(a, p.From.Reg, obj.REG_NONE, p.To.Reg) case 48: /* movv fr,r */ a := SP(2, 1) | (1 << 21) /* dmtc1 */ o1 = OP_RRR(a, p.To.Reg, obj.REG_NONE, p.From.Reg) case 49: /* undef */ o1 = 52 /* trap -- teq r0, r0 */ /* relocation operations */ case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */ o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, REGTMP) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = objabi.R_ADDRMIPSU o2 = OP_IRR(c.opirr(p.As), 0, REGTMP, p.From.Reg) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.To.Sym rel2.Add = p.To.Offset rel2.Type = objabi.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(c.oprrr(AADDVU), REGSB, REGTMP, REGTMP) rel2.Off += 4 } case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */ o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, REGTMP) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRMIPSU o2 = OP_IRR(c.opirr(-p.As), 0, REGTMP, p.To.Reg) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset rel2.Type = objabi.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(c.oprrr(AADDVU), REGSB, REGTMP, REGTMP) rel2.Off += 4 } case 52: /* mov $lext, r ==> lu + add REGSB, r + add */ // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. o1 = OP_IRR(c.opirr(ALUI), 0, REGZERO, p.To.Reg) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRMIPSU o2 = OP_IRR(c.opirr(add), 0, p.To.Reg, p.To.Reg) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset rel2.Type = objabi.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(c.oprrr(AADDVU), REGSB, p.To.Reg, p.To.Reg) rel2.Off += 4 } case 53: /* mov r, tlsvar ==> rdhwr + sw o(r3) */ // clobbers R3 ! // load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(c.opirr(p.As), 0, REG_R3, p.From.Reg) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc + 4) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = objabi.R_ADDRMIPSTLS case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */ // clobbers R3 ! // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(c.opirr(-p.As), 0, REG_R3, p.To.Reg) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc + 4) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRMIPSTLS case 55: /* mov $tlsvar, r ==> rdhwr + add */ // clobbers R3 ! // NOTE: this case does not use REGTMP. If it ever does, // remove the NOTUSETMP flag in optab. o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(c.opirr(add), 0, REG_R3, p.To.Reg) rel := obj.Addrel(c.cursym) rel.Off = int32(c.pc + 4) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = objabi.R_ADDRMIPSTLS case 56: /* vmov{b,h,w,d} $scon, wr */ v := c.regoff(&p.From) o1 = OP_VI10(110, c.twobitdf(p.As), v, uint32(p.To.Reg), 7) case 57: /* vld $soreg, wr */ v := c.lsoffset(p.As, c.regoff(&p.From)) o1 = OP_VMI10(v, uint32(p.From.Reg), uint32(p.To.Reg), 8, c.twobitdf(p.As)) case 58: /* vst wr, $soreg */ v := c.lsoffset(p.As, c.regoff(&p.To)) o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As)) case 59: o1 = OP_RRR(c.oprrr(p.As), p.From.Reg, REGZERO, p.To.Reg) } out[0] = o1 out[1] = o2 out[2] = o3 out[3] = o4 } func (c *ctxt0) vregoff(a *obj.Addr) int64 { c.instoffset = 0 c.aclass(a) return c.instoffset } func (c *ctxt0) regoff(a *obj.Addr) int32 { return int32(c.vregoff(a)) } func (c *ctxt0) oprrr(a obj.As) uint32 { switch a { case AADD: return OP(4, 0) case AADDU: return OP(4, 1) case ASGT: return OP(5, 2) case ASGTU: return OP(5, 3) case AAND: return OP(4, 4) case AOR: return OP(4, 5) case AXOR: return OP(4, 6) case ASUB: return OP(4, 2) case ASUBU, ANEGW: return OP(4, 3) case ANOR: return OP(4, 7) case ASLL: return OP(0, 4) case ASRL: return OP(0, 6) case ASRA: return OP(0, 7) case AROTR: return OP(8, 6) case ASLLV: return OP(2, 4) case ASRLV: return OP(2, 6) case ASRAV: return OP(2, 7) case AROTRV: return OP(10, 6) case AADDV: return OP(5, 4) case AADDVU: return OP(5, 5) case ASUBV: return OP(5, 6) case ASUBVU, ANEGV: return OP(5, 7) case AREM, ADIV: return OP(3, 2) case AREMU, ADIVU: return OP(3, 3) case AMUL: return OP(3, 0) case AMULU: return OP(3, 1) case AREMV, ADIVV: return OP(3, 6) case AREMVU, ADIVVU: return OP(3, 7) case AMULV: return OP(3, 4) case AMULVU: return OP(3, 5) case AJMP: return OP(1, 0) case AJAL: return OP(1, 1) case ABREAK: return OP(1, 5) case ASYSCALL: return OP(1, 4) case ATLBP: return MMU(1, 0) case ATLBR: return MMU(0, 1) case ATLBWI: return MMU(0, 2) case ATLBWR: return MMU(0, 6) case ARFE: return MMU(2, 0) case ADIVF: return FPF(0, 3) case ADIVD: return FPD(0, 3) case AMULF: return FPF(0, 2) case AMULD: return FPD(0, 2) case ASUBF: return FPF(0, 1) case ASUBD: return FPD(0, 1) case AADDF: return FPF(0, 0) case AADDD: return FPD(0, 0) case ATRUNCFV: return FPF(1, 1) case ATRUNCDV: return FPD(1, 1) case ATRUNCFW: return FPF(1, 5) case ATRUNCDW: return FPD(1, 5) case AMOVFV: return FPF(4, 5) case AMOVDV: return FPD(4, 5) case AMOVVF: return FPV(4, 0) case AMOVVD: return FPV(4, 1) case AMOVFW: return FPF(4, 4) case AMOVDW: return FPD(4, 4) case AMOVWF: return FPW(4, 0) case AMOVDF: return FPD(4, 0) case AMOVWD: return FPW(4, 1) case AMOVFD: return FPF(4, 1) case AABSF: return FPF(0, 5) case AABSD: return FPD(0, 5) case AMOVF: return FPF(0, 6) case AMOVD: return FPD(0, 6) case ANEGF: return FPF(0, 7) case ANEGD: return FPD(0, 7) case ACMPEQF: return FPF(6, 2) case ACMPEQD: return FPD(6, 2) case ACMPGTF: return FPF(7, 4) case ACMPGTD: return FPD(7, 4) case ACMPGEF: return FPF(7, 6) case ACMPGED: return FPD(7, 6) case ASQRTF: return FPF(0, 4) case ASQRTD: return FPD(0, 4) case ASYNC: return OP(1, 7) case ANOOP: return 0 case ACMOVN: return OP(1, 3) case ACMOVZ: return OP(1, 2) case ACMOVT: return OP(0, 1) | (1 << 16) case ACMOVF: return OP(0, 1) | (0 << 16) case ACLO: return SP(3, 4) | OP(4, 1) case ACLZ: return SP(3, 4) | OP(4, 0) case AMADD: return SP(3, 4) | OP(0, 0) case AMSUB: return SP(3, 4) | OP(0, 4) case AWSBH: return SP(3, 7) | OP(20, 0) case ADSBH: return SP(3, 7) | OP(20, 4) case ADSHD: return SP(3, 7) | OP(44, 4) case ASEB: return SP(3, 7) | OP(132, 0) case ASEH: return SP(3, 7) | OP(196, 0) } if a < 0 { c.ctxt.Diag("bad rrr opcode -%v", -a) } else { c.ctxt.Diag("bad rrr opcode %v", a) } return 0 } func (c *ctxt0) opirr(a obj.As) uint32 { switch a { case AADD: return SP(1, 0) case AADDU: return SP(1, 1) case ASGT: return SP(1, 2) case ASGTU: return SP(1, 3) case AAND: return SP(1, 4) case AOR: return SP(1, 5) case AXOR: return SP(1, 6) case ALUI: return SP(1, 7) case ASLL: return OP(0, 0) case ASRL: return OP(0, 2) case ASRA: return OP(0, 3) case AROTR: return OP(0, 2) | 1<<21 case AADDV: return SP(3, 0) case AADDVU: return SP(3, 1) case AJMP: return SP(0, 2) case AJAL, obj.ADUFFZERO, obj.ADUFFCOPY: return SP(0, 3) case ABEQ: return SP(0, 4) case -ABEQ: return SP(2, 4) /* likely */ case ABNE: return SP(0, 5) case -ABNE: return SP(2, 5) /* likely */ case ABGEZ: return SP(0, 1) | BCOND(0, 1) case -ABGEZ: return SP(0, 1) | BCOND(0, 3) /* likely */ case ABGEZAL: return SP(0, 1) | BCOND(2, 1) case -ABGEZAL: return SP(0, 1) | BCOND(2, 3) /* likely */ case ABGTZ: return SP(0, 7) case -ABGTZ: return SP(2, 7) /* likely */ case ABLEZ: return SP(0, 6) case -ABLEZ: return SP(2, 6) /* likely */ case ABLTZ: return SP(0, 1) | BCOND(0, 0) case -ABLTZ: return SP(0, 1) | BCOND(0, 2) /* likely */ case ABLTZAL: return SP(0, 1) | BCOND(2, 0) case -ABLTZAL: return SP(0, 1) | BCOND(2, 2) /* likely */ case ABFPT: return SP(2, 1) | (257 << 16) case -ABFPT: return SP(2, 1) | (259 << 16) /* likely */ case ABFPF: return SP(2, 1) | (256 << 16) case -ABFPF: return SP(2, 1) | (258 << 16) /* likely */ case AMOVB, AMOVBU: return SP(5, 0) case AMOVH, AMOVHU: return SP(5, 1) case AMOVW, AMOVWU: return SP(5, 3) case AMOVV: return SP(7, 7) case AMOVF: return SP(7, 1) case AMOVD: return SP(7, 5) case AMOVWL: return SP(5, 2) case AMOVWR: return SP(5, 6) case AMOVVL: return SP(5, 4) case AMOVVR: return SP(5, 5) case ABREAK: return SP(5, 7) case -AMOVWL: return SP(4, 2) case -AMOVWR: return SP(4, 6) case -AMOVVL: return SP(3, 2) case -AMOVVR: return SP(3, 3) case -AMOVB: return SP(4, 0) case -AMOVBU: return SP(4, 4) case -AMOVH: return SP(4, 1) case -AMOVHU: return SP(4, 5) case -AMOVW: return SP(4, 3) case -AMOVWU: return SP(4, 7) case -AMOVV: return SP(6, 7) case -AMOVF: return SP(6, 1) case -AMOVD: return SP(6, 5) case ASLLV: return OP(7, 0) case ASRLV: return OP(7, 2) case ASRAV: return OP(7, 3) case AROTRV: return OP(7, 2) | 1<<21 case -ASLLV: return OP(7, 4) case -ASRLV: return OP(7, 6) case -ASRAV: return OP(7, 7) case -AROTRV: return OP(7, 6) | 1<<21 case ATEQ: return OP(6, 4) case ATNE: return OP(6, 6) case -ALL: return SP(6, 0) case -ALLV: return SP(6, 4) case ASC: return SP(7, 0) case ASCV: return SP(7, 4) } if a < 0 { c.ctxt.Diag("bad irr opcode -%v", -a) } else { c.ctxt.Diag("bad irr opcode %v", a) } return 0 } func vshift(a obj.As) bool { switch a { case ASLLV, ASRLV, ASRAV, AROTRV: return true } return false } // MSA Two-bit Data Format Field Encoding func (c *ctxt0) twobitdf(a obj.As) uint32 { switch a { case AVMOVB: return 0 case AVMOVH: return 1 case AVMOVW: return 2 case AVMOVD: return 3 default: c.ctxt.Diag("unsupported data format %v", a) } return 0 } // MSA Load/Store offset have to be multiple of size of data format func (c *ctxt0) lsoffset(a obj.As, o int32) int32 { var mod int32 switch a { case AVMOVB: mod = 1 case AVMOVH: mod = 2 case AVMOVW: mod = 4 case AVMOVD: mod = 8 default: c.ctxt.Diag("unsupported instruction:%v", a) } if o%mod != 0 { c.ctxt.Diag("invalid offset for %v: %d is not a multiple of %d", a, o, mod) } return o / mod }
go/src/cmd/internal/obj/mips/asm0.go/0
{ "file_path": "go/src/cmd/internal/obj/mips/asm0.go", "repo_id": "go", "token_count": 29918 }
156
// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova. // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package ppc64 import ( "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" "cmd/internal/sys" "internal/abi" "internal/buildcfg" "log" "math" "math/bits" "strings" ) // Test if this value can encoded as a mask for // li -1, rx; rlic rx,rx,sh,mb. // Masks can also extend from the msb and wrap to // the lsb too. That is, the valid masks are 32 bit strings // of the form: 0..01..10..0 or 1..10..01..1 or 1...1 func isPPC64DoublewordRotateMask(v64 int64) bool { // Isolate rightmost 1 (if none 0) and add. v := uint64(v64) vp := (v & -v) + v // Likewise, for the wrapping case. vn := ^v vpn := (vn & -vn) + vn return (v&vp == 0 || vn&vpn == 0) && v != 0 } // Encode a doubleword rotate mask into mb (mask begin) and // me (mask end, inclusive). Note, POWER ISA labels bits in // big endian order. func encodePPC64RLDCMask(mask int64) (mb, me int) { // Determine boundaries and then decode them mb = bits.LeadingZeros64(uint64(mask)) me = 64 - bits.TrailingZeros64(uint64(mask)) mbn := bits.LeadingZeros64(^uint64(mask)) men := 64 - bits.TrailingZeros64(^uint64(mask)) // Check for a wrapping mask (e.g bits at 0 and 63) if mb == 0 && me == 64 { // swap the inverted values mb, me = men, mbn } // Note, me is inclusive. return mb, me - 1 } // Is this a symbol which should never have a TOC prologue generated? // These are special functions which should not have a TOC regeneration // prologue. func isNOTOCfunc(name string) bool { switch { case name == "runtime.duffzero": return true case name == "runtime.duffcopy": return true case strings.HasPrefix(name, "runtime.elf_"): return true default: return false } } // Try converting FMOVD/FMOVS to XXSPLTIDP. If it is converted, // return true. func convertFMOVtoXXSPLTIDP(p *obj.Prog) bool { if p.From.Type != obj.TYPE_FCONST || buildcfg.GOPPC64 < 10 { return false } v := p.From.Val.(float64) if float64(float32(v)) != v { return false } // Secondly, is this value a normal value? ival := int64(math.Float32bits(float32(v))) isDenorm := ival&0x7F800000 == 0 && ival&0x007FFFFF != 0 if !isDenorm { p.As = AXXSPLTIDP p.From.Type = obj.TYPE_CONST p.From.Offset = ival // Convert REG_Fx into equivalent REG_VSx p.To.Reg = REG_VS0 + (p.To.Reg & 31) } return !isDenorm } func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.From.Class = 0 p.To.Class = 0 c := ctxt9{ctxt: ctxt, newprog: newprog} // Rewrite BR/BL to symbol as TYPE_BRANCH. switch p.As { case ABR, ABL, obj.ARET, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Rewrite float constants to values stored in memory. switch p.As { case AFMOVS: if p.From.Type == obj.TYPE_FCONST && !convertFMOVtoXXSPLTIDP(p) { f32 := float32(p.From.Val.(float64)) p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float32Sym(f32) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AFMOVD: if p.From.Type == obj.TYPE_FCONST { f64 := p.From.Val.(float64) // Constant not needed in memory for float +/- 0 if f64 != 0 && !convertFMOVtoXXSPLTIDP(p) { p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Float64Sym(f64) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } case AMOVW, AMOVWZ: // Note, for backwards compatibility, MOVW $const, Rx and MOVWZ $const, Rx are identical. if p.From.Type == obj.TYPE_CONST && p.From.Offset != 0 && p.From.Offset&0xFFFF == 0 { // This is a constant shifted 16 bits to the left, convert it to ADDIS/ORIS $const,... p.As = AADDIS // Use ORIS for large constants which should not be sign extended. if p.From.Offset >= 0x80000000 { p.As = AORIS } p.Reg = REG_R0 p.From.Offset >>= 16 } case AMOVD: // Skip this opcode if it is not a constant load. if p.From.Type != obj.TYPE_CONST || p.From.Name != obj.NAME_NONE || p.From.Reg != 0 { break } // 32b constants (signed and unsigned) can be generated via 1 or 2 instructions. They can be assembled directly. isS32 := int64(int32(p.From.Offset)) == p.From.Offset isU32 := uint64(uint32(p.From.Offset)) == uint64(p.From.Offset) // If prefixed instructions are supported, a 34b signed constant can be generated by one pli instruction. isS34 := pfxEnabled && (p.From.Offset<<30)>>30 == p.From.Offset // Try converting MOVD $const,Rx into ADDIS/ORIS $s32>>16,R0,Rx switch { case isS32 && p.From.Offset&0xFFFF == 0 && p.From.Offset != 0: p.As = AADDIS p.From.Offset >>= 16 p.Reg = REG_R0 case isU32 && p.From.Offset&0xFFFF == 0 && p.From.Offset != 0: p.As = AORIS p.From.Offset >>= 16 p.Reg = REG_R0 case isS32 || isU32 || isS34: // The assembler can generate this opcode in 1 (on Power10) or 2 opcodes. // Otherwise, see if the large constant can be generated with 2 instructions. If not, load it from memory. default: // Is this a shifted 16b constant? If so, rewrite it to avoid a creating and loading a constant. val := p.From.Offset shift := bits.TrailingZeros64(uint64(val)) mask := int64(0xFFFF) << shift if val&mask == val || (val>>(shift+16) == -1 && (val>>shift)<<shift == val) { // Rewrite this value into MOVD $const>>shift, Rto; SLD $shift, Rto q := obj.Appendp(p, c.newprog) q.As = ASLD q.From.SetConst(int64(shift)) q.To = p.To p.From.Offset >>= shift p = q } else if isPPC64DoublewordRotateMask(val) { // This constant is a mask value, generate MOVD $-1, Rto; RLDIC Rto, ^me, mb, Rto mb, me := encodePPC64RLDCMask(val) q := obj.Appendp(p, c.newprog) q.As = ARLDC q.AddRestSourceConst((^int64(me)) & 0x3F) q.AddRestSourceConst(int64(mb)) q.From = p.To q.To = p.To p.From.Offset = -1 p = q } else { // Load the constant from memory. p.From.Type = obj.TYPE_MEM p.From.Sym = ctxt.Int64Sym(p.From.Offset) p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } } switch p.As { // Rewrite SUB constants into ADD. case ASUBC: if p.From.Type == obj.TYPE_CONST { p.From.Offset = -p.From.Offset p.As = AADDC } case ASUBCCC: if p.From.Type == obj.TYPE_CONST { p.From.Offset = -p.From.Offset p.As = AADDCCC } case ASUB: if p.From.Type != obj.TYPE_CONST { break } // Rewrite SUB $const,... into ADD $-const,... p.From.Offset = -p.From.Offset p.As = AADD // This is now an ADD opcode, try simplifying it below. fallthrough // Rewrite ADD/OR/XOR/ANDCC $const,... forms into ADDIS/ORIS/XORIS/ANDISCC case AADD: // Don't rewrite if this is not adding a constant value, or is not an int32 if p.From.Type != obj.TYPE_CONST || p.From.Offset == 0 || int64(int32(p.From.Offset)) != p.From.Offset { break } if p.From.Offset&0xFFFF == 0 { // The constant can be added using ADDIS p.As = AADDIS p.From.Offset >>= 16 } else if buildcfg.GOPPC64 >= 10 { // Let the assembler generate paddi for large constants. break } else if (p.From.Offset < -0x8000 && int64(int32(p.From.Offset)) == p.From.Offset) || (p.From.Offset > 0xFFFF && p.From.Offset < 0x7FFF8000) { // For a constant x, 0xFFFF (UINT16_MAX) < x < 0x7FFF8000 or -0x80000000 (INT32_MIN) <= x < -0x8000 (INT16_MIN) // This is not done for 0x7FFF < x < 0x10000; the assembler will generate a slightly faster instruction sequence. // // The constant x can be rewritten as ADDIS + ADD as follows: // ADDIS $x>>16 + (x>>15)&1, rX, rY // ADD $int64(int16(x)), rY, rY // The range is slightly asymmetric as 0x7FFF8000 and above overflow the sign bit, whereas for // negative values, this would happen with constant values between -1 and -32768 which can // assemble into a single addi. is := p.From.Offset>>16 + (p.From.Offset>>15)&1 i := int64(int16(p.From.Offset)) p.As = AADDIS p.From.Offset = is q := obj.Appendp(p, c.newprog) q.As = AADD q.From.SetConst(i) q.Reg = p.To.Reg q.To = p.To p = q } case AOR: if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 { p.As = AORIS p.From.Offset >>= 16 } case AXOR: if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 { p.As = AXORIS p.From.Offset >>= 16 } case AANDCC: if p.From.Type == obj.TYPE_CONST && uint64(p.From.Offset)&0xFFFFFFFF0000FFFF == 0 && p.From.Offset != 0 { p.As = AANDISCC p.From.Offset >>= 16 } // To maintain backwards compatibility, we accept some 4 argument usage of // several opcodes which was likely not intended, but did work. These are not // added to optab to avoid the chance this behavior might be used with newer // instructions. // // Rewrite argument ordering like "ADDEX R3, $3, R4, R5" into // "ADDEX R3, R4, $3, R5" case AVSHASIGMAW, AVSHASIGMAD, AADDEX, AXXSLDWI, AXXPERMDI: if len(p.RestArgs) == 2 && p.Reg == 0 && p.RestArgs[0].Addr.Type == obj.TYPE_CONST && p.RestArgs[1].Addr.Type == obj.TYPE_REG { p.Reg = p.RestArgs[1].Addr.Reg p.RestArgs = p.RestArgs[:1] } } if c.ctxt.Headtype == objabi.Haix { c.rewriteToUseTOC(p) } else if c.ctxt.Flag_dynlink { c.rewriteToUseGot(p) } } // Rewrite p, if necessary, to access a symbol using its TOC anchor. // This code is for AIX only. func (c *ctxt9) rewriteToUseTOC(p *obj.Prog) { if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFZERO/ADUFFCOPY is considered as an ABL except in dynamic // link where it should be an indirect call. if !c.ctxt.Flag_dynlink { return } // ADUFFxxx $offset // becomes // MOVD runtime.duffxxx@TOC, R12 // ADD $offset, R12 // MOVD R12, LR // BL (LR) var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = c.ctxt.Lookup("runtime.duffzero") } else { sym = c.ctxt.Lookup("runtime.duffcopy") } // Retrieve or create the TOC anchor. symtoc := c.ctxt.LookupInit("TOC."+sym.Name, func(s *obj.LSym) { s.Type = objabi.SDATA s.Set(obj.AttrDuplicateOK, true) s.Set(obj.AttrStatic, true) c.ctxt.Data = append(c.ctxt.Data, s) s.WriteAddr(c.ctxt, 0, 8, sym, 0) }) offset := p.To.Offset p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_TOCREF p.From.Sym = symtoc p.To.Type = obj.TYPE_REG p.To.Reg = REG_R12 p.To.Name = obj.NAME_NONE p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(p, c.newprog) p1.As = AADD p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R12 p2 := obj.Appendp(p1, c.newprog) p2.As = AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = REG_R12 p2.To.Type = obj.TYPE_REG p2.To.Reg = REG_LR p3 := obj.Appendp(p2, c.newprog) p3.As = obj.ACALL p3.To.Type = obj.TYPE_REG p3.To.Reg = REG_LR } var source *obj.Addr if p.From.Name == obj.NAME_EXTERN || p.From.Name == obj.NAME_STATIC { if p.From.Type == obj.TYPE_ADDR { if p.As == ADWORD { // ADWORD $sym doesn't need TOC anchor return } if p.As != AMOVD { c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v", p) return } if p.To.Type != obj.TYPE_REG { c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v", p) return } } else if p.From.Type != obj.TYPE_MEM { c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p) return } source = &p.From } else if p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC { if p.To.Type != obj.TYPE_MEM { c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p) return } if source != nil { c.ctxt.Diag("cannot handle symbols on both sides in %v", p) return } source = &p.To } else { return } if source.Sym == nil { c.ctxt.Diag("do not know how to handle nil symbol in %v", p) return } if source.Sym.Type == objabi.STLSBSS { return } // Retrieve or create the TOC anchor. symtoc := c.ctxt.LookupInit("TOC."+source.Sym.Name, func(s *obj.LSym) { s.Type = objabi.SDATA s.Set(obj.AttrDuplicateOK, true) s.Set(obj.AttrStatic, true) c.ctxt.Data = append(c.ctxt.Data, s) s.WriteAddr(c.ctxt, 0, 8, source.Sym, 0) }) if source.Type == obj.TYPE_ADDR { // MOVD $sym, Rx becomes MOVD symtoc, Rx // MOVD $sym+<off>, Rx becomes MOVD symtoc, Rx; ADD <off>, Rx p.From.Type = obj.TYPE_MEM p.From.Sym = symtoc p.From.Name = obj.NAME_TOCREF if p.From.Offset != 0 { q := obj.Appendp(p, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset p.From.Offset = 0 q.To = p.To } return } // MOVx sym, Ry becomes MOVD symtoc, REGTMP; MOVx (REGTMP), Ry // MOVx Ry, sym becomes MOVD symtoc, REGTMP; MOVx Ry, (REGTMP) // An addition may be inserted between the two MOVs if there is an offset. q := obj.Appendp(p, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Sym = symtoc q.From.Name = obj.NAME_TOCREF q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q = obj.Appendp(q, c.newprog) q.As = p.As q.From = p.From q.To = p.To if p.From.Name != obj.NAME_NONE { q.From.Type = obj.TYPE_MEM q.From.Reg = REGTMP q.From.Name = obj.NAME_NONE q.From.Sym = nil } else if p.To.Name != obj.NAME_NONE { q.To.Type = obj.TYPE_MEM q.To.Reg = REGTMP q.To.Name = obj.NAME_NONE q.To.Sym = nil } else { c.ctxt.Diag("unreachable case in rewriteToUseTOC with %v", p) } obj.Nopout(p) } // Rewrite p, if necessary, to access global data via the global offset table. func (c *ctxt9) rewriteToUseGot(p *obj.Prog) { if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFxxx $offset // becomes // MOVD runtime.duffxxx@GOT, R12 // ADD $offset, R12 // MOVD R12, LR // BL (LR) var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal) } else { sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal) } offset := p.To.Offset p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF p.From.Sym = sym p.To.Type = obj.TYPE_REG p.To.Reg = REG_R12 p.To.Name = obj.NAME_NONE p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(p, c.newprog) p1.As = AADD p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R12 p2 := obj.Appendp(p1, c.newprog) p2.As = AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = REG_R12 p2.To.Type = obj.TYPE_REG p2.To.Reg = REG_LR p3 := obj.Appendp(p2, c.newprog) p3.As = obj.ACALL p3.To.Type = obj.TYPE_REG p3.To.Reg = REG_LR } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { // MOVD $sym, Rx becomes MOVD sym@GOT, Rx // MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx if p.As != AMOVD { c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p) } if p.To.Type != obj.TYPE_REG { c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF if p.From.Offset != 0 { q := obj.Appendp(p, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset q.To = p.To p.From.Offset = 0 } } if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVx sym, Ry becomes MOVD sym@GOT, REGTMP; MOVx (REGTMP), Ry // MOVx Ry, sym becomes MOVD sym@GOT, REGTMP; MOVx Ry, (REGTMP) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { source = &p.To } else { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Sym.Type == objabi.STLSBSS { return } if source.Type != obj.TYPE_MEM { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(p, c.newprog) p2 := obj.Appendp(p1, c.newprog) p1.As = AMOVD p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = REGTMP p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = REGTMP p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = REGTMP p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) } func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // TODO(minux): add morestack short-cuts with small fixed frame-size. if cursym.Func().Text == nil || cursym.Func().Text.Link == nil { return } c := ctxt9{ctxt: ctxt, cursym: cursym, newprog: newprog} p := c.cursym.Func().Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. p.From.Sym.Set(obj.AttrNoFrame, true) textstksiz = 0 } if textstksiz%8 != 0 { c.ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From.Sym.NoFrame() { if textstksiz != 0 { c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } c.cursym.Func().Args = p.To.Val.(int32) c.cursym.Func().Locals = int32(textstksiz) /* * find leaf subroutines * expand RET * expand BECOME pseudo */ var q *obj.Prog var q1 *obj.Prog for p := c.cursym.Func().Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ALWAR, ALBAR, ASTBCCC, ASTWCCC, AEIEIO, AICBI, AISYNC, ATLBIE, ATLBIEL, ASLBIA, ASLBIE, ASLBMFEE, ASLBMFEV, ASLBMTE, ADCBF, ADCBI, ADCBST, ADCBT, ADCBTST, ADCBZ, ASYNC, ATLBSYNC, APTESYNC, ALWSYNC, ATW, AWORD, ARFI, ARFCI, ARFID, AHRFID: q = p p.Mark |= LABEL | SYNC continue case AMOVW, AMOVWZ, AMOVD: q = p if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } continue case AFABS, AFABSCC, AFADD, AFADDCC, AFCTIW, AFCTIWCC, AFCTIWZ, AFCTIWZCC, AFDIV, AFDIVCC, AFMADD, AFMADDCC, AFMOVD, AFMOVDU, /* case AFMOVDS: */ AFMOVS, AFMOVSU, /* case AFMOVSD: */ AFMSUB, AFMSUBCC, AFMUL, AFMULCC, AFNABS, AFNABSCC, AFNEG, AFNEGCC, AFNMADD, AFNMADDCC, AFNMSUB, AFNMSUBCC, AFRSP, AFRSPCC, AFSUB, AFSUBCC: q = p p.Mark |= FLOAT continue case ABL, ABCL, obj.ADUFFZERO, obj.ADUFFCOPY: c.cursym.Func().Text.Mark &^= LEAF fallthrough case ABC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABNE, ABR, ABVC, ABVS: p.Mark |= BRANCH q = p q1 = p.To.Target() if q1 != nil { // NOPs are not removed due to #40689. if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } else { p.Mark |= LABEL } q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case AFCMPO, AFCMPU: q = p p.Mark |= FCMP | FLOAT continue case obj.ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: // NOPs are not removed due to // #40689 continue default: q = p continue } } autosize := int32(0) var p1 *obj.Prog var p2 *obj.Prog for p := c.cursym.Func().Text; p != nil; p = p.Link { o := p.As switch o { case obj.ATEXT: autosize = int32(textstksiz) if p.Mark&LEAF != 0 && autosize == 0 { // A leaf function with no locals has no frame. p.From.Sym.Set(obj.AttrNoFrame, true) } if !p.From.Sym.NoFrame() { // If there is a stack frame at all, it includes // space to save the LR. autosize += int32(c.ctxt.Arch.FixedFrameSize) } if p.Mark&LEAF != 0 && autosize < abi.StackSmall { // A leaf function with a small stack can be marked // NOSPLIT, avoiding a stack check. p.From.Sym.Set(obj.AttrNoSplit, true) } p.To.Offset = int64(autosize) q = p if NeedTOCpointer(c.ctxt) && !isNOTOCfunc(c.cursym.Name) { // When compiling Go into PIC, without PCrel support, all functions must start // with instructions to load the TOC pointer into r2: // // addis r2, r12, .TOC.-func@ha // addi r2, r2, .TOC.-func@l+4 // // We could probably skip this prologue in some situations // but it's a bit subtle. However, it is both safe and // necessary to leave the prologue off duffzero and // duffcopy as we rely on being able to jump to a specific // instruction offset for them. // // These are AWORDS because there is no (afaict) way to // generate the addis instruction except as part of the // load of a large constant, and in that case there is no // way to use r12 as the source. // // Note that the same condition is tested in // putelfsym in cmd/link/internal/ld/symtab.go // where we set the st_other field to indicate // the presence of these instructions. q = obj.Appendp(q, c.newprog) q.As = AWORD q.Pos = p.Pos q.From.Type = obj.TYPE_CONST q.From.Offset = 0x3c4c0000 q = obj.Appendp(q, c.newprog) q.As = AWORD q.Pos = p.Pos q.From.Type = obj.TYPE_CONST q.From.Offset = 0x38420000 rel := obj.Addrel(c.cursym) rel.Off = 0 rel.Siz = 8 rel.Sym = c.ctxt.Lookup(".TOC.") rel.Type = objabi.R_ADDRPOWER_PCREL } if !c.cursym.Func().Text.From.Sym.NoSplit() { q = c.stacksplit(q, autosize) // emit split check } if autosize != 0 { var prologueEnd *obj.Prog // Save the link register and update the SP. MOVDU is used unless // the frame size is too large. The link register must be saved // even for non-empty leaf functions so that traceback works. if autosize >= -BIG && autosize <= BIG { // Use MOVDU to adjust R1 when saving R31, if autosize is small. q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP prologueEnd = q q = obj.Appendp(q, c.newprog) q.As = AMOVDU q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_MEM q.To.Offset = int64(-autosize) q.To.Reg = REGSP q.Spadj = autosize } else { // Frame size is too large for a MOVDU instruction. // Store link register before decrementing SP, so if a signal comes // during the execution of the function prologue, the traceback // code will not see a half-updated stack frame. // This sequence is not async preemptible, as if we open a frame // at the current SP, it will clobber the saved LR. q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REG_R29 // REGTMP may be used to synthesize large offset in the next instruction q = c.ctxt.StartUnsafePoint(q, c.newprog) q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REG_R29 q.To.Type = obj.TYPE_MEM q.To.Offset = int64(-autosize) q.To.Reg = REGSP prologueEnd = q q = obj.Appendp(q, c.newprog) q.As = AADD q.Pos = p.Pos q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) } prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd) } else if c.cursym.Func().Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. c.cursym.Func().Text.Mark |= LEAF } if c.cursym.Func().Text.Mark&LEAF != 0 { c.cursym.Set(obj.AttrLeaf, true) break } if NeedTOCpointer(c.ctxt) { q = obj.Appendp(q, c.newprog) q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REGSP q.To.Offset = 24 } if c.cursym.Func().Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R22 // CMP R22, $0 // BEQ end // MOVD panic_argp(R22), R23 // ADD $(autosize+8), R1, R24 // CMP R23, R24 // BNE end // ADD $8, R1, R25 // MOVD R25, panic_argp(R22) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes. q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R22 q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R22 q.To.Type = obj.TYPE_CONST q.To.Offset = 0 q = obj.Appendp(q, c.newprog) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 = q q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R22 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R23 q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + c.ctxt.Arch.FixedFrameSize q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R24 q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R23 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R24 q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 = q q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = c.ctxt.Arch.FixedFrameSize q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R25 q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R25 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R22 q.To.Offset = 0 // Panic.argp q = obj.Appendp(q, c.newprog) q.As = obj.ANOP p1.To.SetTarget(q) p2.To.SetTarget(q) } case obj.ARET: if p.From.Type == obj.TYPE_CONST { c.ctxt.Diag("using BECOME (%v) is not supported!", p) break } retTarget := p.To.Sym if c.cursym.Func().Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} if retTarget == nil { p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { p.To.Type = obj.TYPE_BRANCH p.To.Sym = retTarget } p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = c.newprog() q.As = ABR q.Pos = p.Pos if retTarget == nil { q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR } else { q.To.Type = obj.TYPE_BRANCH q.To.Sym = retTarget } q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP q = c.newprog() q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Link = p.Link p.Link = q p = q if false { // Debug bad returns q = c.newprog() q.As = AMOVD q.Pos = p.Pos q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } prev := p if autosize != 0 { q = c.newprog() q.As = AADD q.Pos = p.Pos q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link prev.Link = q prev = q } q1 = c.newprog() q1.As = ABR q1.Pos = p.Pos if retTarget == nil { q1.To.Type = obj.TYPE_REG q1.To.Reg = REG_LR } else { q1.To.Type = obj.TYPE_BRANCH q1.To.Sym = retTarget } q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link prev.Link = q1 case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } case AMOVDU: if p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP { p.Spadj = int32(-p.To.Offset) } if p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP { p.Spadj = int32(-p.From.Offset) } case obj.AGETCALLERPC: if cursym.Leaf() { /* MOVD LR, Rd */ p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR } else { /* MOVD (RSP), Rd */ p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP } } if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 && p.As != ACMPU { f := c.cursym.Func() if f.FuncFlag&abi.FuncFlagSPWrite == 0 { c.cursym.Func().FuncFlag |= abi.FuncFlagSPWrite if ctxt.Debugvlog || !ctxt.IsAsm { ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p) if !ctxt.IsAsm { ctxt.Diag("invalid auto-SPWRITE in non-assembly") ctxt.DiagFlush() log.Fatalf("bad SPWRITE") } } } } } } /* // instruction scheduling if(debug['Q'] == 0) return; curtext = nil; q = nil; // p - 1 q1 = firstp; // top of block o = 0; // count of instructions for(p = firstp; p != nil; p = p1) { p1 = p->link; o++; if(p->mark & NOSCHED){ if(q1 != p){ sched(q1, q); } for(; p != nil; p = p->link){ if(!(p->mark & NOSCHED)) break; q = p; } p1 = p; q1 = p; o = 0; continue; } if(p->mark & (LABEL|SYNC)) { if(q1 != p) sched(q1, q); q1 = p; o = 1; } if(p->mark & (BRANCH|SYNC)) { sched(q1, p); q1 = p1; o = 0; } if(o >= NSCHED) { sched(q1, p); q1 = p1; o = 0; } q = p; } */ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { if c.ctxt.Flag_maymorestack != "" { if c.ctxt.Flag_shared || c.ctxt.Flag_dynlink { // See the call to morestack for why these are // complicated to support. c.ctxt.Diag("maymorestack with -shared or -dynlink is not supported") } // Spill arguments. This has to happen before we open // any more frame space. p = c.cursym.Func().SpillRegisterArgs(p, c.newprog) // Save LR and REGCTXT frameSize := 8 + c.ctxt.Arch.FixedFrameSize // MOVD LR, REGTMP p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP // MOVDU REGTMP, -16(SP) p = obj.Appendp(p, c.newprog) p.As = AMOVDU p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_MEM p.To.Offset = -frameSize p.To.Reg = REGSP p.Spadj = int32(frameSize) // MOVD REGCTXT, 8(SP) p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REGCTXT p.To.Type = obj.TYPE_MEM p.To.Offset = 8 p.To.Reg = REGSP // BL maymorestack p = obj.Appendp(p, c.newprog) p.As = ABL p.To.Type = obj.TYPE_BRANCH // See ../x86/obj6.go p.To.Sym = c.ctxt.LookupABI(c.ctxt.Flag_maymorestack, c.cursym.ABI()) // Restore LR and REGCTXT // MOVD 8(SP), REGCTXT p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 8 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGCTXT // MOVD 0(SP), REGTMP p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP // MOVD REGTMP, LR p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR // ADD $16, SP p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = frameSize p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -int32(frameSize) // Unspill arguments. p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog) } // save entry point, but skipping the two instructions setting R2 in shared mode and maymorestack startPred := p // MOVD g_stackguard(g), R22 p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 if c.cursym.CFunc() { p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R22 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) var q *obj.Prog if framesize <= abi.StackSmall { // small stack: SP < stackguard // CMP stackguard, SP p = obj.Appendp(p, c.newprog) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R22 p.To.Type = obj.TYPE_REG p.To.Reg = REGSP } else { // large stack: SP-framesize < stackguard-StackSmall offset := int64(framesize) - abi.StackSmall if framesize > abi.StackBig { // Such a large stack we need to protect against underflow. // The runtime guarantees SP > objabi.StackBig, but // framesize is large enough that SP-framesize may // underflow, causing a direct comparison with the // stack guard to incorrectly succeed. We explicitly // guard against underflow. // // CMPU SP, $(framesize-StackSmall) // BLT label-of-call-to-morestack if offset <= 0xffff { p = obj.Appendp(p, c.newprog) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REGSP p.To.Type = obj.TYPE_CONST p.To.Offset = offset } else { // Constant is too big for CMPU. p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST p.From.Offset = offset p.To.Type = obj.TYPE_REG p.To.Reg = REG_R23 p = obj.Appendp(p, c.newprog) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R23 } p = obj.Appendp(p, c.newprog) q = p p.As = ABLT p.To.Type = obj.TYPE_BRANCH } // Check against the stack guard. We've ensured this won't underflow. // ADD $-(framesize-StackSmall), SP, R4 // CMPU stackguard, R4 p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = -offset p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R23 p = obj.Appendp(p, c.newprog) p.As = ACMPU p.From.Type = obj.TYPE_REG p.From.Reg = REG_R22 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R23 } // q1: BLT done p = obj.Appendp(p, c.newprog) q1 := p p.As = ABLT p.To.Type = obj.TYPE_BRANCH p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder if q != nil { q.To.SetTarget(p) } // Spill the register args that could be clobbered by the // morestack code. spill := c.cursym.Func().SpillRegisterArgs(p, c.newprog) // MOVD LR, R5 p = obj.Appendp(spill, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_LR p.To.Type = obj.TYPE_REG p.To.Reg = REG_R5 p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) var morestacksym *obj.LSym if c.cursym.CFunc() { morestacksym = c.ctxt.Lookup("runtime.morestackc") } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() { morestacksym = c.ctxt.Lookup("runtime.morestack_noctxt") } else { morestacksym = c.ctxt.Lookup("runtime.morestack") } if NeedTOCpointer(c.ctxt) { // In PPC64 PIC code, R2 is used as TOC pointer derived from R12 // which is the address of function entry point when entering // the function. We need to preserve R2 across call to morestack. // Fortunately, in shared mode, 8(SP) and 16(SP) are reserved in // the caller's frame, but not used (0(SP) is caller's saved LR, // 24(SP) is caller's saved R2). Use 8(SP) to save this function's R2. // MOVD R2, 8(SP) p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_R2 p.To.Type = obj.TYPE_MEM p.To.Reg = REGSP p.To.Offset = 8 } if c.ctxt.Flag_dynlink { // Avoid calling morestack via a PLT when dynamically linking. The // PLT stubs generated by the system linker on ppc64le when "std r2, // 24(r1)" to save the TOC pointer in their callers stack // frame. Unfortunately (and necessarily) morestack is called before // the function that calls it sets up its frame and so the PLT ends // up smashing the saved TOC pointer for its caller's caller. // // According to the ABI documentation there is a mechanism to avoid // the TOC save that the PLT stub does (put a R_PPC64_TOCSAVE // relocation on the nop after the call to morestack) but at the time // of writing it is not supported at all by gold and my attempt to // use it with ld.bfd caused an internal linker error. So this hack // seems preferable. // MOVD $runtime.morestack(SB), R12 p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Sym = morestacksym p.From.Name = obj.NAME_GOTREF p.To.Type = obj.TYPE_REG p.To.Reg = REG_R12 // MOVD R12, LR p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_REG p.From.Reg = REG_R12 p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR // BL LR p = obj.Appendp(p, c.newprog) p.As = obj.ACALL p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { // BL runtime.morestack(SB) p = obj.Appendp(p, c.newprog) p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = morestacksym } if NeedTOCpointer(c.ctxt) { // MOVD 8(SP), R2 p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP p.From.Offset = 8 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 } // The instructions which unspill regs should be preemptible. p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) unspill := c.cursym.Func().UnspillRegisterArgs(p, c.newprog) // BR start p = obj.Appendp(unspill, c.newprog) p.As = ABR p.To.Type = obj.TYPE_BRANCH p.To.SetTarget(startPred.Link) // placeholder for q1's jump target p = obj.Appendp(p, c.newprog) p.As = obj.ANOP // zero-width place holder q1.To.SetTarget(p) return p } // MMA accumulator to/from instructions are slightly ambiguous since // the argument represents both source and destination, specified as // an accumulator. It is treated as a unary destination to simplify // the code generation in ppc64map. var unaryDst = map[obj.As]bool{ AXXSETACCZ: true, AXXMTACC: true, AXXMFACC: true, } var Linkppc64 = obj.LinkArch{ Arch: sys.ArchPPC64, Init: buildop, Preprocess: preprocess, Assemble: span9, Progedit: progedit, UnaryDst: unaryDst, DWARFRegisters: PPC64DWARFRegisters, } var Linkppc64le = obj.LinkArch{ Arch: sys.ArchPPC64LE, Init: buildop, Preprocess: preprocess, Assemble: span9, Progedit: progedit, UnaryDst: unaryDst, DWARFRegisters: PPC64DWARFRegisters, }
go/src/cmd/internal/obj/ppc64/obj9.go/0
{ "file_path": "go/src/cmd/internal/obj/ppc64/obj9.go", "repo_id": "go", "token_count": 19833 }
157
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package s390x import ( "math/bits" ) // RotateParams represents the immediates required for a "rotate // then ... selected bits instruction". // // The Start and End values are the indexes that represent // the masked region. They are inclusive and are in big- // endian order (bit 0 is the MSB, bit 63 is the LSB). They // may wrap around. // // Some examples: // // Masked region | Start | End // --------------------------+-------+---- // 0x00_00_00_00_00_00_00_0f | 60 | 63 // 0xf0_00_00_00_00_00_00_00 | 0 | 3 // 0xf0_00_00_00_00_00_00_0f | 60 | 3 // // The Amount value represents the amount to rotate the // input left by. Note that this rotation is performed // before the masked region is used. type RotateParams struct { Start uint8 // big-endian start bit index [0..63] End uint8 // big-endian end bit index [0..63] Amount uint8 // amount to rotate left } // NewRotateParams creates a set of parameters representing a // rotation left by the amount provided and a selection of the bits // between the provided start and end indexes (inclusive). // // The start and end indexes and the rotation amount must all // be in the range 0-63 inclusive or this function will panic. func NewRotateParams(start, end, amount uint8) RotateParams { if start&^63 != 0 { panic("start out of bounds") } if end&^63 != 0 { panic("end out of bounds") } if amount&^63 != 0 { panic("amount out of bounds") } return RotateParams{ Start: start, End: end, Amount: amount, } } // RotateLeft generates a new set of parameters with the rotation amount // increased by the given value. The selected bits are left unchanged. func (r RotateParams) RotateLeft(amount uint8) RotateParams { r.Amount += amount r.Amount &= 63 return r } // OutMask provides a mask representing the selected bits. func (r RotateParams) OutMask() uint64 { // Note: z must be unsigned for bootstrap compiler z := uint8(63-r.End+r.Start) & 63 // number of zero bits in mask return bits.RotateLeft64(^uint64(0)<<z, -int(r.Start)) } // InMask provides a mask representing the selected bits relative // to the source value (i.e. pre-rotation). func (r RotateParams) InMask() uint64 { return bits.RotateLeft64(r.OutMask(), -int(r.Amount)) } // OutMerge tries to generate a new set of parameters representing // the intersection between the selected bits and the provided mask. // If the intersection is unrepresentable (0 or not contiguous) nil // will be returned. func (r RotateParams) OutMerge(mask uint64) *RotateParams { mask &= r.OutMask() if mask == 0 { return nil } // normalize the mask so that the set bits are left aligned o := bits.LeadingZeros64(^mask) mask = bits.RotateLeft64(mask, o) z := bits.LeadingZeros64(mask) mask = bits.RotateLeft64(mask, z) // check that the normalized mask is contiguous l := bits.LeadingZeros64(^mask) if l+bits.TrailingZeros64(mask) != 64 { return nil } // update start and end positions (rotation amount remains the same) r.Start = uint8(o+z) & 63 r.End = (r.Start + uint8(l) - 1) & 63 return &r } // InMerge tries to generate a new set of parameters representing // the intersection between the selected bits and the provided mask // as applied to the source value (i.e. pre-rotation). // If the intersection is unrepresentable (0 or not contiguous) nil // will be returned. func (r RotateParams) InMerge(mask uint64) *RotateParams { return r.OutMerge(bits.RotateLeft64(mask, int(r.Amount))) } func (RotateParams) CanBeAnSSAAux() {}
go/src/cmd/internal/obj/s390x/rotate.go/0
{ "file_path": "go/src/cmd/internal/obj/s390x/rotate.go", "repo_id": "go", "token_count": 1205 }
158
// Code generated by x86avxgen. DO NOT EDIT. package x86 // VEX instructions that come in two forms: // VTHING xmm2/m128, xmmV, xmm1 // VTHING ymm2/m256, ymmV, ymm1 // // The opcode array in the corresponding Optab entry // should contain the (VEX prefixes, opcode byte) pair // for each of the two forms. // For example, the entries for VPXOR are: // // VPXOR xmm2/m128, xmmV, xmm1 // VEX.NDS.128.66.0F.WIG EF /r // // VPXOR ymm2/m256, ymmV, ymm1 // VEX.NDS.256.66.0F.WIG EF /r // // Produce this optab entry: // // {AVPXOR, yvex_xy3, Pavx, opBytes{vex128|vex66|vex0F|vexWIG, 0xEF, vex256|vex66|vex0F|vexWIG, 0xEF}} // // VEX requires at least 2 bytes inside opBytes: // - VEX prefixes (vex-prefixed constants) // - Opcode byte // // EVEX instructions extend VEX form variety: // VTHING zmm2/m512, zmmV, zmm1 -- implicit K0 (merging) // VTHING zmm2/m512, zmmV, K, zmm1 -- explicit K mask (can't use K0) // // EVEX requires at least 3 bytes inside opBytes: // - EVEX prefixes (evex-prefixed constants); similar to VEX // - Displacement multiplier info (scale / broadcast scale) // - Opcode byte; similar to VEX // // Both VEX and EVEX instructions may have opdigit (opcode extension) byte // which follows the primary opcode byte. // Because it can only have value of 0-7, it is written in octal notation. // // x86.csv can be very useful for figuring out proper [E]VEX parts. var _yandnl = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yrl, Yrl}}, } var _ybextrl = []ytab{ {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yrl, Yml, Yrl}}, } var _yblsil = []ytab{ {zcase: Zvex_rm_r_vo, zoffset: 3, args: argList{Yml, Yrl}}, } var _ykaddb = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yk, Yk}}, } var _ykmovb = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yk, Ym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yrl}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ykm, Yk}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yrl, Yk}}, } var _yknotb = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yk, Yk}}, } var _ykshiftlb = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yu8, Yk, Yk}}, } var _yrorxl = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yml, Yrl}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yml, Yrl}}, } var _yv4fmaddps = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YzrMulti4, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Ym, YzrMulti4, Yknot0, Yzr}}, } var _yv4fmaddss = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YxrEvexMulti4, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Ym, YxrEvexMulti4, Yknot0, YxrEvex}}, } var _yvaddpd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, } var _yvaddsd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, } var _yvaddsubpd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, } var _yvaesdec = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yzm, Yzr, Yzr}}, } var _yvaesimc = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, } var _yvaeskeygenassist = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, } var _yvalignd = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, } var _yvandnpd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvblendmpd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvblendpd = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, } var _yvblendvpd = []ytab{ {zcase: Zvex_hr_rm_v_r, zoffset: 2, args: argList{Yxr, Yxm, Yxr, Yxr}}, {zcase: Zvex_hr_rm_v_r, zoffset: 2, args: argList{Yyr, Yym, Yyr, Yyr}}, } var _yvbroadcastf128 = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, } var _yvbroadcastf32x2 = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, } var _yvbroadcastf32x4 = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, Yzr}}, } var _yvbroadcastf32x8 = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, Yzr}}, } var _yvbroadcasti32x2 = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, } var _yvbroadcastsd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, } var _yvbroadcastss = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, } var _yvcmppd = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yk}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, Yk}}, } var _yvcmpsd = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, } var _yvcomisd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex}}, } var _yvcompresspd = []ytab{ {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, } var _yvcvtdq2pd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, } var _yvcvtdq2ps = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, } var _yvcvtpd2dq = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, YyrEvex}}, } var _yvcvtpd2dqx = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, } var _yvcvtpd2dqy = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YxrEvex}}, } var _yvcvtpd2qq = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, } var _yvcvtpd2udqx = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, } var _yvcvtpd2udqy = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YxrEvex}}, } var _yvcvtph2ps = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, } var _yvcvtps2ph = []ytab{ {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yxm}}, {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yxm}}, {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yyr, Yxm}}, {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yyr, Yxm}}, {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YymEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YymEvex}}, {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YxrEvex, YxmEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YyrEvex, YxmEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YyrEvex, Yknot0, YxmEvex}}, } var _yvcvtps2qq = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, } var _yvcvtsd2si = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yrl}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, Yrl}}, } var _yvcvtsd2usil = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, Yrl}}, } var _yvcvtsi2sdl = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex, YxrEvex}}, } var _yvcvtudq2pd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, Yzr}}, } var _yvcvtusi2sdl = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex, YxrEvex}}, } var _yvdppd = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, } var _yvexp2pd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, } var _yvexpandpd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, } var _yvextractf128 = []ytab{ {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yyr, Yxm}}, {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yyr, Yxm}}, } var _yvextractf32x4 = []ytab{ {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, YyrEvex, YxmEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, YyrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YxmEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YxmEvex}}, } var _yvextractf32x8 = []ytab{ {zcase: Zevex_i_r_rm, zoffset: 0, args: argList{Yu8, Yzr, YymEvex}}, {zcase: Zevex_i_r_k_rm, zoffset: 3, args: argList{Yu8, Yzr, Yknot0, YymEvex}}, } var _yvextractps = []ytab{ {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yml}}, {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yml}}, {zcase: Zevex_i_r_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yml}}, } var _yvfixupimmpd = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, } var _yvfixupimmsd = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, } var _yvfpclasspdx = []ytab{ {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, Yk}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, Yk}}, } var _yvfpclasspdy = []ytab{ {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, Yk}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, Yk}}, } var _yvfpclasspdz = []ytab{ {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yk}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yk}}, } var _yvgatherdpd = []ytab{ {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yyr, Yxvm, Yyr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, Yzr}}, } var _yvgatherdps = []ytab{ {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yyr, Yyvm, Yyr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzvm, Yknot0, Yzr}}, } var _yvgatherpf0dpd = []ytab{ {zcase: Zevex_k_rmo, zoffset: 4, args: argList{Yknot0, YyvmEvex}}, } var _yvgatherpf0dps = []ytab{ {zcase: Zevex_k_rmo, zoffset: 4, args: argList{Yknot0, Yzvm}}, } var _yvgatherqps = []ytab{ {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yxvm, Yxr}}, {zcase: Zvex_v_rm_r, zoffset: 2, args: argList{Yxr, Yyvm, Yxr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxvmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YyvmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzvm, Yknot0, YyrEvex}}, } var _yvgetexpsd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, } var _yvgetmantpd = []ytab{ {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, } var _yvgf2p8affineinvqb = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, } var _yvinsertf128 = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yyr, Yyr}}, } var _yvinsertf32x4 = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yzr, Yknot0, Yzr}}, } var _yvinsertf32x8 = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yzr, Yknot0, Yzr}}, } var _yvinsertps = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, } var _yvlddqu = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, } var _yvldmxcsr = []ytab{ {zcase: Zvex_rm_v_ro, zoffset: 3, args: argList{Ym}}, } var _yvmaskmovdqu = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr}}, } var _yvmaskmovpd = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxr, Ym}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yyr, Ym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr, Yyr}}, } var _yvmovapd = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, } var _yvmovd = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yml}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Yml}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex}}, } var _yvmovddup = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, } var _yvmovdqa = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Yym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, } var _yvmovdqa32 = []ytab{ {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YymEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YymEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, Yzm}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzm}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yzm, Yknot0, Yzr}}, } var _yvmovhlps = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxrEvex, YxrEvex, YxrEvex}}, } var _yvmovhpd = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr, Yxr}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Ym}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YxrEvex, YxrEvex}}, } var _yvmovmskpd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yrl}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yyr, Yrl}}, } var _yvmovntdq = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yyr, Ym}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Ym}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YyrEvex, Ym}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{Yzr, Ym}}, } var _yvmovntdqa = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Ym, Yzr}}, } var _yvmovq = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yml}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxm}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yml, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, Yml}}, {zcase: Zevex_r_v_rm, zoffset: 3, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yml, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxmEvex, YxrEvex}}, } var _yvmovsd = []ytab{ {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, {zcase: Zvex_r_v_rm, zoffset: 2, args: argList{Yxr, Ym}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Ym, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxr, Yxr, Yxr}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_r_v_k_rm, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, Ym}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, Ym}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Ym, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Ym, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxrEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxrEvex, YxrEvex, Yknot0, YxrEvex}}, } var _yvpbroadcastb = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yrl, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{Yrl, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr}}, {zcase: Zevex_rm_k_r, zoffset: 3, args: argList{YxmEvex, Yknot0, Yzr}}, } var _yvpbroadcastmb2q = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yk, Yzr}}, } var _yvpclmulqdq = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yxm, Yxr, Yxr}}, {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yzr}}, } var _yvpcmpb = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, YxrEvex, Yknot0, Yk}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, Yk}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yk}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yk}}, } var _yvpcmpeqb = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yk}}, } var _yvperm2f128 = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yym, Yyr, Yyr}}, } var _yvpermd = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvpermilpd = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr, Yyr}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvpermpd = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yu8, Yym, Yyr}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvpermq = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, } var _yvpextrw = []ytab{ {zcase: Zvex_i_r_rm, zoffset: 0, args: argList{Yu8, Yxr, Yml}}, {zcase: Zvex_i_r_rm, zoffset: 2, args: argList{Yi8, Yxr, Yml}}, {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxr, Yrl}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxr, Yrl}}, {zcase: Zevex_i_r_rm, zoffset: 3, args: argList{Yu8, YxrEvex, Yml}}, {zcase: Zevex_i_rm_r, zoffset: 3, args: argList{Yu8, YxrEvex, Yrl}}, } var _yvpinsrb = []ytab{ {zcase: Zvex_i_rm_v_r, zoffset: 2, args: argList{Yu8, Yml, Yxr, Yxr}}, {zcase: Zevex_i_rm_v_r, zoffset: 3, args: argList{Yu8, Yml, YxrEvex, YxrEvex}}, } var _yvpmovb2m = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YxrEvex, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{YyrEvex, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 3, args: argList{Yzr, Yk}}, } var _yvpmovdb = []ytab{ {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YxmEvex}}, } var _yvpmovdw = []ytab{ {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YxrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{YyrEvex, YxmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxmEvex}}, {zcase: Zevex_r_v_rm, zoffset: 0, args: argList{Yzr, YymEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YymEvex}}, } var _yvprold = []ytab{ {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, } var _yvpscatterdd = []ytab{ {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YyvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, Yzvm}}, } var _yvpscatterdq = []ytab{ {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, YxvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{Yzr, Yknot0, YyvmEvex}}, } var _yvpscatterqd = []ytab{ {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YxvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YxrEvex, Yknot0, YyvmEvex}}, {zcase: Zevex_r_k_rm, zoffset: 3, args: argList{YyrEvex, Yknot0, Yzvm}}, } var _yvpshufbitqmb = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, Yk}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yk}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yk}}, } var _yvpshufd = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yknot0, Yzr}}, } var _yvpslld = []ytab{ {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yxr, Yxr}}, {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yxr, Yxr}}, {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yyr, Yyr}}, {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yyr, Yyr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yyr, Yyr}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, Yzr, Yknot0, Yzr}}, } var _yvpslldq = []ytab{ {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yxr, Yxr}}, {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yxr, Yxr}}, {zcase: Zvex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yyr, Yyr}}, {zcase: Zvex_i_rm_vo, zoffset: 3, args: argList{Yi8, Yyr, Yyr}}, {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 4, args: argList{Yu8, Yzm, Yzr}}, } var _yvpsraq = []ytab{ {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YxmEvex, YxrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YxmEvex, Yknot0, YxrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, YymEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_vo, zoffset: 0, args: argList{Yu8, Yzm, Yzr}}, {zcase: Zevex_i_rm_k_vo, zoffset: 4, args: argList{Yu8, Yzm, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, Yzr, Yknot0, Yzr}}, } var _yvptest = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr}}, {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yym, Yyr}}, } var _yvrcpss = []ytab{ {zcase: Zvex_rm_v_r, zoffset: 2, args: argList{Yxm, Yxr, Yxr}}, } var _yvroundpd = []ytab{ {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yxm, Yxr}}, {zcase: Zvex_i_rm_r, zoffset: 0, args: argList{Yu8, Yym, Yyr}}, {zcase: Zvex_i_rm_r, zoffset: 2, args: argList{Yi8, Yym, Yyr}}, } var _yvscalefpd = []ytab{ {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{Yzm, Yzr, Yzr}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{Yzm, Yzr, Yknot0, Yzr}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YxmEvex, YxrEvex, YxrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YxmEvex, YxrEvex, Yknot0, YxrEvex}}, {zcase: Zevex_rm_v_r, zoffset: 0, args: argList{YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_rm_v_k_r, zoffset: 3, args: argList{YymEvex, YyrEvex, Yknot0, YyrEvex}}, } var _yvshuff32x4 = []ytab{ {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, YymEvex, YyrEvex, YyrEvex}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, YymEvex, YyrEvex, Yknot0, YyrEvex}}, {zcase: Zevex_i_rm_v_r, zoffset: 0, args: argList{Yu8, Yzm, Yzr, Yzr}}, {zcase: Zevex_i_rm_v_k_r, zoffset: 3, args: argList{Yu8, Yzm, Yzr, Yknot0, Yzr}}, } var _yvzeroall = []ytab{ {zcase: Zvex, zoffset: 2, args: argList{}}, } var avxOptab = [...]Optab{ {as: AANDNL, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF2, }}, {as: AANDNQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF2, }}, {as: ABEXTRL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF7, }}, {as: ABEXTRQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF7, }}, {as: ABLSIL, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 03, }}, {as: ABLSIQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 03, }}, {as: ABLSMSKL, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 02, }}, {as: ABLSMSKQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 02, }}, {as: ABLSRL, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF3, 01, }}, {as: ABLSRQ, ytab: _yblsil, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF3, 01, }}, {as: ABZHIL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW0, 0xF5, }}, {as: ABZHIQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F38 | vexW1, 0xF5, }}, {as: AKADDB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x4A, }}, {as: AKADDD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x4A, }}, {as: AKADDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x4A, }}, {as: AKADDW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x4A, }}, {as: AKANDB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x41, }}, {as: AKANDD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x41, }}, {as: AKANDNB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x42, }}, {as: AKANDND, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x42, }}, {as: AKANDNQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x42, }}, {as: AKANDNW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x42, }}, {as: AKANDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x41, }}, {as: AKANDW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x41, }}, {as: AKMOVB, ytab: _ykmovb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x91, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x93, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x90, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x92, }}, {as: AKMOVD, ytab: _ykmovb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW1, 0x91, avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x93, avxEscape | vex128 | vex66 | vex0F | vexW1, 0x90, avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x92, }}, {as: AKMOVQ, ytab: _ykmovb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW1, 0x91, avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x93, avxEscape | vex128 | vex0F | vexW1, 0x90, avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x92, }}, {as: AKMOVW, ytab: _ykmovb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x91, avxEscape | vex128 | vex0F | vexW0, 0x93, avxEscape | vex128 | vex0F | vexW0, 0x90, avxEscape | vex128 | vex0F | vexW0, 0x92, }}, {as: AKNOTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x44, }}, {as: AKNOTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW1, 0x44, }}, {as: AKNOTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW1, 0x44, }}, {as: AKNOTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x44, }}, {as: AKORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x45, }}, {as: AKORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x45, }}, {as: AKORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x45, }}, {as: AKORTESTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x98, }}, {as: AKORTESTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW1, 0x98, }}, {as: AKORTESTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW1, 0x98, }}, {as: AKORTESTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x98, }}, {as: AKORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x45, }}, {as: AKSHIFTLB, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x32, }}, {as: AKSHIFTLD, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x33, }}, {as: AKSHIFTLQ, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x33, }}, {as: AKSHIFTLW, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x32, }}, {as: AKSHIFTRB, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x30, }}, {as: AKSHIFTRD, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x31, }}, {as: AKSHIFTRQ, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x31, }}, {as: AKSHIFTRW, ytab: _ykshiftlb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x30, }}, {as: AKTESTB, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x99, }}, {as: AKTESTD, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW1, 0x99, }}, {as: AKTESTQ, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW1, 0x99, }}, {as: AKTESTW, ytab: _yknotb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x99, }}, {as: AKUNPCKBW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x4B, }}, {as: AKUNPCKDQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x4B, }}, {as: AKUNPCKWD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x4B, }}, {as: AKXNORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x46, }}, {as: AKXNORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x46, }}, {as: AKXNORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x46, }}, {as: AKXNORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x46, }}, {as: AKXORB, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x47, }}, {as: AKXORD, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW1, 0x47, }}, {as: AKXORQ, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW1, 0x47, }}, {as: AKXORW, ytab: _ykaddb, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x47, }}, {as: AMULXL, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF6, }}, {as: AMULXQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF6, }}, {as: APDEPL, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF5, }}, {as: APDEPQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF5, }}, {as: APEXTL, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F38 | vexW0, 0xF5, }}, {as: APEXTQ, ytab: _yandnl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F38 | vexW1, 0xF5, }}, {as: ARORXL, ytab: _yrorxl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F3A | vexW0, 0xF0, }}, {as: ARORXQ, ytab: _yrorxl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F3A | vexW1, 0xF0, }}, {as: ASARXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F38 | vexW0, 0xF7, }}, {as: ASARXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F38 | vexW1, 0xF7, }}, {as: ASHLXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xF7, }}, {as: ASHLXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xF7, }}, {as: ASHRXL, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW0, 0xF7, }}, {as: ASHRXQ, ytab: _ybextrl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F38 | vexW1, 0xF7, }}, {as: AV4FMADDPS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x9A, }}, {as: AV4FMADDSS, ytab: _yv4fmaddss, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x9B, }}, {as: AV4FNMADDPS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xAA, }}, {as: AV4FNMADDSS, ytab: _yv4fmaddss, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xAB, }}, {as: AVADDPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x58, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x58, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x58, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x58, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x58, }}, {as: AVADDPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x58, avxEscape | vex256 | vex0F | vexW0, 0x58, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x58, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x58, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x58, }}, {as: AVADDSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x58, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x58, }}, {as: AVADDSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x58, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x58, }}, {as: AVADDSUBPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD0, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD0, }}, {as: AVADDSUBPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xD0, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xD0, }}, {as: AVAESDEC, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDE, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDE, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDE, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDE, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDE, }}, {as: AVAESDECLAST, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDF, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDF, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDF, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDF, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDF, }}, {as: AVAESENC, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDC, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDC, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDC, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDC, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDC, }}, {as: AVAESENCLAST, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDD, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xDD, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0xDD, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0xDD, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0xDD, }}, {as: AVAESIMC, ytab: _yvaesimc, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xDB, }}, {as: AVAESKEYGENASSIST, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0xDF, }}, {as: AVALIGND, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x03, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x03, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x03, }}, {as: AVALIGNQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x03, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x03, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x03, }}, {as: AVANDNPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x55, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x55, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x55, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x55, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x55, }}, {as: AVANDNPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x55, avxEscape | vex256 | vex0F | vexW0, 0x55, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x55, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x55, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x55, }}, {as: AVANDPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x54, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x54, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x54, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x54, }}, {as: AVANDPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x54, avxEscape | vex256 | vex0F | vexW0, 0x54, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x54, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x54, }}, {as: AVBLENDMPD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x65, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x65, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x65, }}, {as: AVBLENDMPS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x65, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x65, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x65, }}, {as: AVBLENDPD, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0D, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0D, }}, {as: AVBLENDPS, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0C, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0C, }}, {as: AVBLENDVPD, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4B, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4B, }}, {as: AVBLENDVPS, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4A, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4A, }}, {as: AVBROADCASTF128, ytab: _yvbroadcastf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1A, }}, {as: AVBROADCASTF32X2, ytab: _yvbroadcastf32x2, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x19, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x19, }}, {as: AVBROADCASTF32X4, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1A, }}, {as: AVBROADCASTF32X8, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1B, }}, {as: AVBROADCASTF64X2, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x1A, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x1A, }}, {as: AVBROADCASTF64X4, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x1B, }}, {as: AVBROADCASTI128, ytab: _yvbroadcastf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x5A, }}, {as: AVBROADCASTI32X2, ytab: _yvbroadcasti32x2, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x59, }}, {as: AVBROADCASTI32X4, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x5A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x5A, }}, {as: AVBROADCASTI32X8, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x5B, }}, {as: AVBROADCASTI64X2, ytab: _yvbroadcastf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x5A, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x5A, }}, {as: AVBROADCASTI64X4, ytab: _yvbroadcastf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x5B, }}, {as: AVBROADCASTSD, ytab: _yvbroadcastsd, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x19, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x19, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x19, }}, {as: AVBROADCASTSS, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x18, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x18, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x18, }}, {as: AVCMPPD, ytab: _yvcmppd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC2, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xC2, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled, 0xC2, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8, 0xC2, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8, 0xC2, }}, {as: AVCMPPS, ytab: _yvcmppd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0xC2, avxEscape | vex256 | vex0F | vexW0, 0xC2, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled, 0xC2, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4, 0xC2, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4, 0xC2, }}, {as: AVCMPSD, ytab: _yvcmpsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xC2, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0xC2, }}, {as: AVCMPSS, ytab: _yvcmpsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0xC2, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0xC2, }}, {as: AVCOMISD, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2F, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2F, }}, {as: AVCOMISS, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x2F, avxEscape | evex128 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2F, }}, {as: AVCOMPRESSPD, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8A, }}, {as: AVCOMPRESSPS, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8A, }}, {as: AVCVTDQ2PD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0xE6, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0xE6, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0xE6, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xE6, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTDQ2PS, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5B, avxEscape | vex256 | vex0F | vexW0, 0x5B, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, }}, {as: AVCVTPD2DQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xE6, }}, {as: AVCVTPD2DQX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xE6, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTPD2DQY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xE6, avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTPD2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5A, }}, {as: AVCVTPD2PSX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5A, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5A, }}, {as: AVCVTPD2PSY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5A, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5A, }}, {as: AVCVTPD2QQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7B, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7B, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7B, }}, {as: AVCVTPD2UDQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x79, }}, {as: AVCVTPD2UDQX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x79, }}, {as: AVCVTPD2UDQY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x79, }}, {as: AVCVTPD2UQQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x79, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x79, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x79, }}, {as: AVCVTPH2PS, ytab: _yvcvtph2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x13, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x13, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexSaeEnabled | evexZeroingEnabled, 0x13, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x13, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x13, }}, {as: AVCVTPS2DQ, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5B, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5B, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, }}, {as: AVCVTPS2PD, ytab: _yvcvtph2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5A, avxEscape | vex256 | vex0F | vexW0, 0x5A, avxEscape | evex512 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5A, avxEscape | evex128 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x5A, avxEscape | evex256 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5A, }}, {as: AVCVTPS2PH, ytab: _yvcvtps2ph, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x1D, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x1D, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexSaeEnabled | evexZeroingEnabled, 0x1D, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN8 | evexZeroingEnabled, 0x1D, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x1D, }}, {as: AVCVTPS2QQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x7B, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7B, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7B, }}, {as: AVCVTPS2UDQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x79, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x79, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x79, }}, {as: AVCVTPS2UQQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x79, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x79, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x79, }}, {as: AVCVTQQ2PD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xE6, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTQQ2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5B, }}, {as: AVCVTQQ2PSX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5B, }}, {as: AVCVTQQ2PSY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5B, }}, {as: AVCVTSD2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2D, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexRoundingEnabled, 0x2D, }}, {as: AVCVTSD2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2D, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2D, }}, {as: AVCVTSD2SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5A, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5A, }}, {as: AVCVTSD2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexRoundingEnabled, 0x79, }}, {as: AVCVTSD2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x79, }}, {as: AVCVTSI2SDL, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2A, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN4, 0x2A, }}, {as: AVCVTSI2SDQ, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2A, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2A, }}, {as: AVCVTSI2SSL, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2A, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x2A, }}, {as: AVCVTSI2SSQ, ytab: _yvcvtsi2sdl, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2A, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x2A, }}, {as: AVCVTSS2SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5A, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5A, }}, {as: AVCVTSS2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2D, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x2D, }}, {as: AVCVTSS2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2D, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexRoundingEnabled, 0x2D, }}, {as: AVCVTSS2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x79, }}, {as: AVCVTSS2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexRoundingEnabled, 0x79, }}, {as: AVCVTTPD2DQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xE6, }}, {as: AVCVTTPD2DQX, ytab: _yvcvtpd2dqx, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE6, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTTPD2DQY, ytab: _yvcvtpd2dqy, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE6, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xE6, }}, {as: AVCVTTPD2QQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x7A, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTTPD2UDQ, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x78, }}, {as: AVCVTTPD2UDQX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x78, }}, {as: AVCVTTPD2UDQY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x78, }}, {as: AVCVTTPD2UQQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x78, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x78, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x78, }}, {as: AVCVTTPS2DQ, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5B, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x5B, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5B, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5B, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5B, }}, {as: AVCVTTPS2QQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x7A, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7A, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTTPS2UDQ, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x78, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x78, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x78, }}, {as: AVCVTTPS2UQQ, ytab: _yvcvtps2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x78, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x78, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x78, }}, {as: AVCVTTSD2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x2C, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexSaeEnabled, 0x2C, }}, {as: AVCVTTSD2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW1, 0x2C, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2C, }}, {as: AVCVTTSD2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN8 | evexSaeEnabled, 0x78, }}, {as: AVCVTTSD2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x78, }}, {as: AVCVTTSS2SI, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x2C, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2C, }}, {as: AVCVTTSS2SIQ, ytab: _yvcvtsd2si, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW1, 0x2C, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexSaeEnabled, 0x2C, }}, {as: AVCVTTSS2USIL, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x78, }}, {as: AVCVTTSS2USIQ, ytab: _yvcvtsd2usil, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN4 | evexSaeEnabled, 0x78, }}, {as: AVCVTUDQ2PD, ytab: _yvcvtudq2pd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN8 | evexBcstN4 | evexZeroingEnabled, 0x7A, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUDQ2PS, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7A, avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUQQ2PD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUQQ2PS, ytab: _yvcvtpd2dq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUQQ2PSX, ytab: _yvcvtpd2udqx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUQQ2PSY, ytab: _yvcvtpd2udqy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7A, }}, {as: AVCVTUSI2SDL, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN4, 0x7B, }}, {as: AVCVTUSI2SDQ, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x7B, }}, {as: AVCVTUSI2SSL, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled, 0x7B, }}, {as: AVCVTUSI2SSQ, ytab: _yvcvtusi2sdl, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8 | evexRoundingEnabled, 0x7B, }}, {as: AVDBPSADBW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x42, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x42, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexZeroingEnabled, 0x42, }}, {as: AVDIVPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5E, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5E, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5E, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5E, }}, {as: AVDIVPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5E, avxEscape | vex256 | vex0F | vexW0, 0x5E, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5E, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5E, }}, {as: AVDIVSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5E, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, }}, {as: AVDIVSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5E, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5E, }}, {as: AVDPPD, ytab: _yvdppd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x41, }}, {as: AVDPPS, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x40, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x40, }}, {as: AVEXP2PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xC8, }}, {as: AVEXP2PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xC8, }}, {as: AVEXPANDPD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x88, }}, {as: AVEXPANDPS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x88, }}, {as: AVEXTRACTF128, ytab: _yvextractf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x19, }}, {as: AVEXTRACTF32X4, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x19, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x19, }}, {as: AVEXTRACTF32X8, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x1B, }}, {as: AVEXTRACTF64X2, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x19, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x19, }}, {as: AVEXTRACTF64X4, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x1B, }}, {as: AVEXTRACTI128, ytab: _yvextractf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x39, }}, {as: AVEXTRACTI32X4, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x39, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x39, }}, {as: AVEXTRACTI32X8, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x3B, }}, {as: AVEXTRACTI64X2, ytab: _yvextractf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x39, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x39, }}, {as: AVEXTRACTI64X4, ytab: _yvextractf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x3B, }}, {as: AVEXTRACTPS, ytab: _yvextractps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x17, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x17, }}, {as: AVFIXUPIMMPD, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x54, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x54, }}, {as: AVFIXUPIMMPS, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x54, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x54, }}, {as: AVFIXUPIMMSD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x55, }}, {as: AVFIXUPIMMSS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x55, }}, {as: AVFMADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x98, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x98, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x98, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x98, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x98, }}, {as: AVFMADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x98, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x98, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x98, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x98, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x98, }}, {as: AVFMADD132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x99, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x99, }}, {as: AVFMADD132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x99, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x99, }}, {as: AVFMADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA8, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA8, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA8, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA8, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA8, }}, {as: AVFMADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA8, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA8, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA8, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA8, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA8, }}, {as: AVFMADD213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA9, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA9, }}, {as: AVFMADD213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA9, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA9, }}, {as: AVFMADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB8, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB8, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB8, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB8, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB8, }}, {as: AVFMADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB8, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB8, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB8, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB8, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB8, }}, {as: AVFMADD231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB9, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB9, }}, {as: AVFMADD231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB9, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB9, }}, {as: AVFMADDSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x96, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x96, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x96, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x96, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x96, }}, {as: AVFMADDSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x96, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x96, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x96, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x96, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x96, }}, {as: AVFMADDSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA6, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA6, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA6, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA6, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA6, }}, {as: AVFMADDSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA6, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA6, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA6, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA6, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA6, }}, {as: AVFMADDSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB6, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB6, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB6, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB6, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB6, }}, {as: AVFMADDSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB6, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB6, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB6, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB6, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB6, }}, {as: AVFMSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9A, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9A, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9A, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9A, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9A, }}, {as: AVFMSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9A, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9A, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9A, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9A, }}, {as: AVFMSUB132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9B, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9B, }}, {as: AVFMSUB132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9B, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9B, }}, {as: AVFMSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAA, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAA, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAA, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAA, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAA, }}, {as: AVFMSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAA, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAA, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAA, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAA, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAA, }}, {as: AVFMSUB213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAB, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAB, }}, {as: AVFMSUB213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAB, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAB, }}, {as: AVFMSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBA, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBA, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBA, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBA, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBA, }}, {as: AVFMSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBA, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBA, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBA, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBA, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBA, }}, {as: AVFMSUB231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBB, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBB, }}, {as: AVFMSUB231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBB, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBB, }}, {as: AVFMSUBADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x97, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x97, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x97, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x97, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x97, }}, {as: AVFMSUBADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x97, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x97, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x97, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x97, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x97, }}, {as: AVFMSUBADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xA7, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xA7, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xA7, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xA7, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xA7, }}, {as: AVFMSUBADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xA7, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xA7, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xA7, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xA7, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xA7, }}, {as: AVFMSUBADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xB7, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xB7, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xB7, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB7, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB7, }}, {as: AVFMSUBADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xB7, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xB7, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xB7, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xB7, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xB7, }}, {as: AVFNMADD132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9C, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9C, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9C, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9C, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9C, }}, {as: AVFNMADD132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9C, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9C, }}, {as: AVFNMADD132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9D, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9D, }}, {as: AVFNMADD132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9D, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9D, }}, {as: AVFNMADD213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAC, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAC, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAC, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAC, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAC, }}, {as: AVFNMADD213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAC, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAC, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAC, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAC, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAC, }}, {as: AVFNMADD213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAD, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAD, }}, {as: AVFNMADD213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAD, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAD, }}, {as: AVFNMADD231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBC, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBC, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBC, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBC, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBC, }}, {as: AVFNMADD231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBC, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBC, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBC, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBC, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBC, }}, {as: AVFNMADD231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBD, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBD, }}, {as: AVFNMADD231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBD, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBD, }}, {as: AVFNMSUB132PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9E, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x9E, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9E, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x9E, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x9E, }}, {as: AVFNMSUB132PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x9E, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9E, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x9E, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x9E, }}, {as: AVFNMSUB132SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x9F, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x9F, }}, {as: AVFNMSUB132SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x9F, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x9F, }}, {as: AVFNMSUB213PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAE, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xAE, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAE, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xAE, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xAE, }}, {as: AVFNMSUB213PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAE, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xAE, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAE, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xAE, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xAE, }}, {as: AVFNMSUB213SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xAF, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xAF, }}, {as: AVFNMSUB213SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xAF, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xAF, }}, {as: AVFNMSUB231PD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBE, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0xBE, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBE, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xBE, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xBE, }}, {as: AVFNMSUB231PS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBE, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xBE, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBE, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xBE, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xBE, }}, {as: AVFNMSUB231SD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0xBF, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0xBF, }}, {as: AVFNMSUB231SS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xBF, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0xBF, }}, {as: AVFPCLASSPDX, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x66, }}, {as: AVFPCLASSPDY, ytab: _yvfpclasspdy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x66, }}, {as: AVFPCLASSPDZ, ytab: _yvfpclasspdz, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x66, }}, {as: AVFPCLASSPSX, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x66, }}, {as: AVFPCLASSPSY, ytab: _yvfpclasspdy, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x66, }}, {as: AVFPCLASSPSZ, ytab: _yvfpclasspdz, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x66, }}, {as: AVFPCLASSSD, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x67, }}, {as: AVFPCLASSSS, ytab: _yvfpclasspdx, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x67, }}, {as: AVGATHERDPD, ytab: _yvgatherdpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x92, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x92, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x92, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x92, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x92, }}, {as: AVGATHERDPS, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x92, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x92, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x92, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x92, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x92, }}, {as: AVGATHERPF0DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 01, }}, {as: AVGATHERPF0DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 01, }}, {as: AVGATHERPF0QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 01, }}, {as: AVGATHERPF0QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 01, }}, {as: AVGATHERPF1DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 02, }}, {as: AVGATHERPF1DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 02, }}, {as: AVGATHERPF1QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 02, }}, {as: AVGATHERPF1QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 02, }}, {as: AVGATHERQPD, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x93, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x93, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x93, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x93, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x93, }}, {as: AVGATHERQPS, ytab: _yvgatherqps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x93, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x93, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x93, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x93, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x93, }}, {as: AVGETEXPPD, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x42, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x42, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x42, }}, {as: AVGETEXPPS, ytab: _yvcvtpd2qq, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x42, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x42, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x42, }}, {as: AVGETEXPSD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x43, }}, {as: AVGETEXPSS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x43, }}, {as: AVGETMANTPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x26, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x26, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x26, }}, {as: AVGETMANTPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x26, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x26, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x26, }}, {as: AVGETMANTSD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x27, }}, {as: AVGETMANTSS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x27, }}, {as: AVGF2P8AFFINEINVQB, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0xCF, avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0xCF, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xCF, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xCF, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xCF, }}, {as: AVGF2P8AFFINEQB, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0xCE, avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0xCE, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xCE, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xCE, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xCE, }}, {as: AVGF2P8MULB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0xCF, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0xCF, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0xCF, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0xCF, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0xCF, }}, {as: AVHADDPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7C, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7C, }}, {as: AVHADDPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x7C, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x7C, }}, {as: AVHSUBPD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7D, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7D, }}, {as: AVHSUBPS, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x7D, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x7D, }}, {as: AVINSERTF128, ytab: _yvinsertf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x18, }}, {as: AVINSERTF32X4, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x18, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x18, }}, {as: AVINSERTF32X8, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x1A, }}, {as: AVINSERTF64X2, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x18, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x18, }}, {as: AVINSERTF64X4, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x1A, }}, {as: AVINSERTI128, ytab: _yvinsertf128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x38, }}, {as: AVINSERTI32X4, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x38, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x38, }}, {as: AVINSERTI32X8, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x3A, }}, {as: AVINSERTI64X2, ytab: _yvinsertf32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x38, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x38, }}, {as: AVINSERTI64X4, ytab: _yvinsertf32x8, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x3A, }}, {as: AVINSERTPS, ytab: _yvinsertps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x21, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x21, }}, {as: AVLDDQU, ytab: _yvlddqu, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0xF0, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0xF0, }}, {as: AVLDMXCSR, ytab: _yvldmxcsr, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0xAE, 02, }}, {as: AVMASKMOVDQU, ytab: _yvmaskmovdqu, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF7, }}, {as: AVMASKMOVPD, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2F, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2F, avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2D, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2D, }}, {as: AVMASKMOVPS, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2E, avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2C, }}, {as: AVMAXPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5F, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5F, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x5F, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5F, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5F, }}, {as: AVMAXPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5F, avxEscape | vex256 | vex0F | vexW0, 0x5F, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5F, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5F, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5F, }}, {as: AVMAXSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5F, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x5F, }}, {as: AVMAXSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5F, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5F, }}, {as: AVMINPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5D, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5D, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x5D, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5D, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5D, }}, {as: AVMINPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5D, avxEscape | vex256 | vex0F | vexW0, 0x5D, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x5D, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5D, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5D, }}, {as: AVMINSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5D, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x5D, }}, {as: AVMINSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5D, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x5D, }}, {as: AVMOVAPD, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x29, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x29, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x28, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x28, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x29, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x29, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x29, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x28, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x28, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x28, }}, {as: AVMOVAPS, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x29, avxEscape | vex256 | vex0F | vexW0, 0x29, avxEscape | vex128 | vex0F | vexW0, 0x28, avxEscape | vex256 | vex0F | vexW0, 0x28, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x29, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x29, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x29, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x28, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x28, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x28, }}, {as: AVMOVD, ytab: _yvmovd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7E, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6E, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN4, 0x7E, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN4, 0x6E, }}, {as: AVMOVDDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x12, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x12, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexZeroingEnabled, 0x12, avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x12, avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x12, }}, {as: AVMOVDQA, ytab: _yvmovdqa, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x7F, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x7F, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6F, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6F, }}, {as: AVMOVDQA32, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVDQA64, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVDQU, ytab: _yvmovdqa, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x7F, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x7F, avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x6F, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x6F, }}, {as: AVMOVDQU16, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evexF2 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evexF2 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVDQU32, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVDQU64, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evexF3 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evexF3 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVDQU8, ytab: _yvmovdqa32, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x7F, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x6F, avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x6F, avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x6F, }}, {as: AVMOVHLPS, ytab: _yvmovhlps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x12, avxEscape | evex128 | evex0F | evexW0, 0, 0x12, }}, {as: AVMOVHPD, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x17, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x16, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x17, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x16, }}, {as: AVMOVHPS, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x17, avxEscape | vex128 | vex0F | vexW0, 0x16, avxEscape | evex128 | evex0F | evexW0, evexN8, 0x17, avxEscape | evex128 | evex0F | evexW0, evexN8, 0x16, }}, {as: AVMOVLHPS, ytab: _yvmovhlps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x16, avxEscape | evex128 | evex0F | evexW0, 0, 0x16, }}, {as: AVMOVLPD, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x13, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x12, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x13, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x12, }}, {as: AVMOVLPS, ytab: _yvmovhpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x13, avxEscape | vex128 | vex0F | vexW0, 0x12, avxEscape | evex128 | evex0F | evexW0, evexN8, 0x13, avxEscape | evex128 | evex0F | evexW0, evexN8, 0x12, }}, {as: AVMOVMSKPD, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x50, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x50, }}, {as: AVMOVMSKPS, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x50, avxEscape | vex256 | vex0F | vexW0, 0x50, }}, {as: AVMOVNTDQ, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE7, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE7, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0xE7, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0xE7, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0xE7, }}, {as: AVMOVNTDQA, ytab: _yvmovntdqa, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2A, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2A, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x2A, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x2A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x2A, }}, {as: AVMOVNTPD, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2B, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x2B, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16, 0x2B, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32, 0x2B, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64, 0x2B, }}, {as: AVMOVNTPS, ytab: _yvmovntdq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x2B, avxEscape | vex256 | vex0F | vexW0, 0x2B, avxEscape | evex128 | evex0F | evexW0, evexN16, 0x2B, avxEscape | evex256 | evex0F | evexW0, evexN32, 0x2B, avxEscape | evex512 | evex0F | evexW0, evexN64, 0x2B, }}, {as: AVMOVQ, ytab: _yvmovq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW1, 0x7E, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD6, avxEscape | vex128 | vex66 | vex0F | vexW1, 0x6E, avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x7E, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x7E, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0xD6, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8, 0x6E, avxEscape | evex128 | evexF3 | evex0F | evexW1, evexN8, 0x7E, }}, {as: AVMOVSD, ytab: _yvmovsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11, avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x11, avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10, avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x10, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x11, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8, 0x11, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexZeroingEnabled, 0x10, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexZeroingEnabled, 0x10, }}, {as: AVMOVSHDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x16, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x16, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x16, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x16, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x16, }}, {as: AVMOVSLDUP, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x12, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x12, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x12, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x12, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x12, }}, {as: AVMOVSS, ytab: _yvmovsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x11, avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x11, avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x10, avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x10, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexZeroingEnabled, 0x11, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4, 0x11, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexZeroingEnabled, 0x10, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexZeroingEnabled, 0x10, }}, {as: AVMOVUPD, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x11, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x11, avxEscape | vex128 | vex66 | vex0F | vexW0, 0x10, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x10, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x11, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x11, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x11, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0x10, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexZeroingEnabled, 0x10, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexZeroingEnabled, 0x10, }}, {as: AVMOVUPS, ytab: _yvmovapd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x11, avxEscape | vex256 | vex0F | vexW0, 0x11, avxEscape | vex128 | vex0F | vexW0, 0x10, avxEscape | vex256 | vex0F | vexW0, 0x10, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x11, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x11, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x11, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x10, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x10, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x10, }}, {as: AVMPSADBW, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x42, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x42, }}, {as: AVMULPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x59, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x59, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x59, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x59, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x59, }}, {as: AVMULPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x59, avxEscape | vex256 | vex0F | vexW0, 0x59, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x59, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x59, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x59, }}, {as: AVMULSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x59, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x59, }}, {as: AVMULSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x59, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x59, }}, {as: AVORPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x56, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x56, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x56, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x56, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x56, }}, {as: AVORPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x56, avxEscape | vex256 | vex0F | vexW0, 0x56, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x56, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x56, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x56, }}, {as: AVP4DPWSSD, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x52, }}, {as: AVP4DPWSSDS, ytab: _yv4fmaddps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evexF2 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x53, }}, {as: AVPABSB, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1C, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x1C, }}, {as: AVPABSD, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1E, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x1E, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x1E, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x1E, }}, {as: AVPABSQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x1F, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x1F, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x1F, }}, {as: AVPABSW, ytab: _yvmovddup, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x1D, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x1D, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x1D, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x1D, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x1D, }}, {as: AVPACKSSDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6B, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6B, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x6B, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x6B, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x6B, }}, {as: AVPACKSSWB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x63, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x63, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x63, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x63, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x63, }}, {as: AVPACKUSDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x2B, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x2B, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x2B, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x2B, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x2B, }}, {as: AVPACKUSWB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x67, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x67, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x67, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x67, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x67, }}, {as: AVPADDB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFC, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFC, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xFC, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xFC, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xFC, }}, {as: AVPADDD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFE, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFE, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xFE, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xFE, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xFE, }}, {as: AVPADDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD4, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD4, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xD4, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xD4, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xD4, }}, {as: AVPADDSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEC, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEC, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEC, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEC, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEC, }}, {as: AVPADDSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xED, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xED, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xED, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xED, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xED, }}, {as: AVPADDUSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDC, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDC, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDC, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDC, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDC, }}, {as: AVPADDUSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDD, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDD, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDD, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDD, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDD, }}, {as: AVPADDW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFD, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFD, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xFD, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xFD, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xFD, }}, {as: AVPALIGNR, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0F, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0F, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexZeroingEnabled, 0x0F, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexZeroingEnabled, 0x0F, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexZeroingEnabled, 0x0F, }}, {as: AVPAND, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDB, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDB, }}, {as: AVPANDD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xDB, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xDB, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xDB, }}, {as: AVPANDN, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDF, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDF, }}, {as: AVPANDND, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xDF, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xDF, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xDF, }}, {as: AVPANDNQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xDF, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xDF, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xDF, }}, {as: AVPANDQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xDB, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xDB, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xDB, }}, {as: AVPAVGB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE0, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE0, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE0, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE0, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE0, }}, {as: AVPAVGW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE3, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE3, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE3, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE3, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE3, }}, {as: AVPBLENDD, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x02, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x02, }}, {as: AVPBLENDMB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x66, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x66, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x66, }}, {as: AVPBLENDMD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x64, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x64, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x64, }}, {as: AVPBLENDMQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x64, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x64, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x64, }}, {as: AVPBLENDMW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x66, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x66, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x66, }}, {as: AVPBLENDVB, ytab: _yvblendvpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x4C, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x4C, }}, {as: AVPBLENDW, ytab: _yvblendpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0E, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x0E, }}, {as: AVPBROADCASTB, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x78, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x78, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7A, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x78, }}, {as: AVPBROADCASTD, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x58, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x58, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7C, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x58, }}, {as: AVPBROADCASTMB2Q, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x2A, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x2A, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x2A, }}, {as: AVPBROADCASTMW2D, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x3A, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x3A, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x3A, }}, {as: AVPBROADCASTQ, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x59, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x59, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexZeroingEnabled, 0x7C, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x59, }}, {as: AVPBROADCASTW, ytab: _yvpbroadcastb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x79, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x79, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexZeroingEnabled, 0x7B, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x79, }}, {as: AVPCLMULQDQ, ytab: _yvpclmulqdq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x44, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x44, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x44, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x44, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x44, }}, {as: AVPCMPB, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x3F, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x3F, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x3F, }}, {as: AVPCMPD, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x1F, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x1F, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x1F, }}, {as: AVPCMPEQB, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x74, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x74, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x74, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x74, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x74, }}, {as: AVPCMPEQD, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x76, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x76, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4, 0x76, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4, 0x76, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4, 0x76, }}, {as: AVPCMPEQQ, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x29, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x29, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x29, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x29, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x29, }}, {as: AVPCMPEQW, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x75, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x75, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x75, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x75, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x75, }}, {as: AVPCMPESTRI, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x61, }}, {as: AVPCMPESTRM, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x60, }}, {as: AVPCMPGTB, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x64, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x64, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x64, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x64, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x64, }}, {as: AVPCMPGTD, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x66, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x66, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4, 0x66, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4, 0x66, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4, 0x66, }}, {as: AVPCMPGTQ, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x37, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x37, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x37, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x37, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x37, }}, {as: AVPCMPGTW, ytab: _yvpcmpeqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x65, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x65, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x65, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x65, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x65, }}, {as: AVPCMPISTRI, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexWIG, 0x63, }}, {as: AVPCMPISTRM, ytab: _yvaeskeygenassist, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x62, }}, {as: AVPCMPQ, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x1F, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x1F, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x1F, }}, {as: AVPCMPUB, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16, 0x3E, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32, 0x3E, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64, 0x3E, }}, {as: AVPCMPUD, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4, 0x1E, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4, 0x1E, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4, 0x1E, }}, {as: AVPCMPUQ, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8, 0x1E, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8, 0x1E, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8, 0x1E, }}, {as: AVPCMPUW, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16, 0x3E, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32, 0x3E, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64, 0x3E, }}, {as: AVPCMPW, ytab: _yvpcmpb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16, 0x3F, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32, 0x3F, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64, 0x3F, }}, {as: AVPCOMPRESSB, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x63, }}, {as: AVPCOMPRESSD, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x8B, }}, {as: AVPCOMPRESSQ, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x8B, }}, {as: AVPCOMPRESSW, ytab: _yvcompresspd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x63, }}, {as: AVPCONFLICTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xC4, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xC4, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xC4, }}, {as: AVPCONFLICTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xC4, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xC4, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xC4, }}, {as: AVPDPBUSD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x50, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x50, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x50, }}, {as: AVPDPBUSDS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x51, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x51, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x51, }}, {as: AVPDPWSSD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x52, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x52, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x52, }}, {as: AVPDPWSSDS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x53, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x53, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x53, }}, {as: AVPERM2F128, ytab: _yvperm2f128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x06, }}, {as: AVPERM2I128, ytab: _yvperm2f128, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x46, }}, {as: AVPERMB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x8D, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x8D, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x8D, }}, {as: AVPERMD, ytab: _yvpermd, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x36, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x36, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x36, }}, {as: AVPERMI2B, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x75, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x75, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x75, }}, {as: AVPERMI2D, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x76, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x76, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x76, }}, {as: AVPERMI2PD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x77, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x77, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x77, }}, {as: AVPERMI2PS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x77, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x77, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x77, }}, {as: AVPERMI2Q, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x76, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x76, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x76, }}, {as: AVPERMI2W, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x75, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x75, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x75, }}, {as: AVPERMILPD, ytab: _yvpermilpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x05, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x05, avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0D, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0D, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x05, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x05, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x05, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x0D, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x0D, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x0D, }}, {as: AVPERMILPS, ytab: _yvpermilpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x04, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x04, avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0C, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x04, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x04, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x04, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x0C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x0C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x0C, }}, {as: AVPERMPD, ytab: _yvpermq, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0x01, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x01, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x01, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x16, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x16, }}, {as: AVPERMPS, ytab: _yvpermd, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x16, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x16, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x16, }}, {as: AVPERMQ, ytab: _yvpermq, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex66 | vex0F3A | vexW1, 0x00, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x00, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x00, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x36, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x36, }}, {as: AVPERMT2B, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x7D, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x7D, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x7D, }}, {as: AVPERMT2D, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7E, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7E, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x7E, }}, {as: AVPERMT2PD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x7F, }}, {as: AVPERMT2PS, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x7F, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x7F, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x7F, }}, {as: AVPERMT2Q, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x7E, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x7E, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x7E, }}, {as: AVPERMT2W, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x7D, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x7D, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x7D, }}, {as: AVPERMW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x8D, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x8D, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x8D, }}, {as: AVPEXPANDB, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN1 | evexZeroingEnabled, 0x62, }}, {as: AVPEXPANDD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x89, }}, {as: AVPEXPANDQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x89, }}, {as: AVPEXPANDW, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN2 | evexZeroingEnabled, 0x62, }}, {as: AVPEXTRB, ytab: _yvextractps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x14, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN1, 0x14, }}, {as: AVPEXTRD, ytab: _yvextractps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x16, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x16, }}, {as: AVPEXTRQ, ytab: _yvextractps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x16, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x16, }}, {as: AVPEXTRW, ytab: _yvpextrw, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x15, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC5, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN2, 0x15, avxEscape | evex128 | evex66 | evex0F | evexW0, 0, 0xC5, }}, {as: AVPGATHERDD, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x90, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x90, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x90, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x90, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x90, }}, {as: AVPGATHERDQ, ytab: _yvgatherdpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x90, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x90, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x90, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x90, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x90, }}, {as: AVPGATHERQD, ytab: _yvgatherqps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x91, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x91, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0x91, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0x91, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0x91, }}, {as: AVPGATHERQQ, ytab: _yvgatherdps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x91, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x91, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0x91, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0x91, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0x91, }}, {as: AVPHADDD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x02, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x02, }}, {as: AVPHADDSW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x03, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x03, }}, {as: AVPHADDW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x01, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x01, }}, {as: AVPHMINPOSUW, ytab: _yvaesimc, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x41, }}, {as: AVPHSUBD, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x06, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x06, }}, {as: AVPHSUBSW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x07, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x07, }}, {as: AVPHSUBW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x05, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x05, }}, {as: AVPINSRB, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x20, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN1, 0x20, }}, {as: AVPINSRD, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x22, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4, 0x22, }}, {as: AVPINSRQ, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW1, 0x22, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8, 0x22, }}, {as: AVPINSRW, ytab: _yvpinsrb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC4, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN2, 0xC4, }}, {as: AVPLZCNTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x44, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x44, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x44, }}, {as: AVPLZCNTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x44, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x44, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x44, }}, {as: AVPMADD52HUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB5, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB5, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xB5, }}, {as: AVPMADD52LUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xB4, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xB4, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xB4, }}, {as: AVPMADDUBSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x04, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x04, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x04, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x04, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x04, }}, {as: AVPMADDWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF5, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF5, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF5, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF5, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF5, }}, {as: AVPMASKMOVD, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x8E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x8E, avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x8C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x8C, }}, {as: AVPMASKMOVQ, ytab: _yvmaskmovpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x8E, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x8E, avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x8C, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x8C, }}, {as: AVPMAXSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3C, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3C, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3C, }}, {as: AVPMAXSD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3D, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3D, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3D, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3D, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3D, }}, {as: AVPMAXSQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3D, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3D, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3D, }}, {as: AVPMAXSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEE, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEE, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEE, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEE, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEE, }}, {as: AVPMAXUB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDE, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDE, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDE, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDE, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDE, }}, {as: AVPMAXUD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3F, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3F, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3F, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3F, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3F, }}, {as: AVPMAXUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3F, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3F, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3F, }}, {as: AVPMAXUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3E, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3E, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3E, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3E, }}, {as: AVPMINSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x38, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x38, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x38, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x38, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x38, }}, {as: AVPMINSD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x39, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x39, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x39, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x39, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x39, }}, {as: AVPMINSQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x39, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x39, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x39, }}, {as: AVPMINSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEA, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEA, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xEA, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xEA, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xEA, }}, {as: AVPMINUB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xDA, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xDA, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xDA, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xDA, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xDA, }}, {as: AVPMINUD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3B, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3B, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x3B, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x3B, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x3B, }}, {as: AVPMINUQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x3B, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x3B, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x3B, }}, {as: AVPMINUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x3A, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x3A, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x3A, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x3A, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x3A, }}, {as: AVPMOVB2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x29, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x29, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x29, }}, {as: AVPMOVD2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x39, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x39, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x39, }}, {as: AVPMOVDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x31, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x31, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x31, }}, {as: AVPMOVDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x33, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x33, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x33, }}, {as: AVPMOVM2B, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x28, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x28, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x28, }}, {as: AVPMOVM2D, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, 0, 0x38, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, 0, 0x38, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, 0, 0x38, }}, {as: AVPMOVM2Q, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x38, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x38, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x38, }}, {as: AVPMOVM2W, ytab: _yvpbroadcastmb2q, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x28, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x28, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x28, }}, {as: AVPMOVMSKB, ytab: _yvmovmskpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD7, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD7, }}, {as: AVPMOVQ2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x39, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x39, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x39, }}, {as: AVPMOVQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x32, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x32, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x32, }}, {as: AVPMOVQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x35, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x35, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x35, }}, {as: AVPMOVQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x34, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x34, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x34, }}, {as: AVPMOVSDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x21, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x21, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x21, }}, {as: AVPMOVSDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x23, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x23, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x23, }}, {as: AVPMOVSQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x22, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x22, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x22, }}, {as: AVPMOVSQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x25, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x25, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x25, }}, {as: AVPMOVSQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x24, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x24, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x24, }}, {as: AVPMOVSWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x20, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x20, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x20, }}, {as: AVPMOVSXBD, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x21, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x21, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x21, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x21, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x21, }}, {as: AVPMOVSXBQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x22, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x22, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x22, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x22, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x22, }}, {as: AVPMOVSXBW, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x20, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x20, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x20, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x20, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x20, }}, {as: AVPMOVSXDQ, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x25, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x25, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x25, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x25, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x25, }}, {as: AVPMOVSXWD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x23, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x23, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x23, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x23, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x23, }}, {as: AVPMOVSXWQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x24, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x24, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x24, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x24, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x24, }}, {as: AVPMOVUSDB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x11, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x11, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x11, }}, {as: AVPMOVUSDW, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x13, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x13, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x13, }}, {as: AVPMOVUSQB, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x12, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x12, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x12, }}, {as: AVPMOVUSQD, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x15, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x15, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x15, }}, {as: AVPMOVUSQW, ytab: _yvpmovdb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x14, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x14, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x14, }}, {as: AVPMOVUSWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x10, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x10, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x10, }}, {as: AVPMOVW2M, ytab: _yvpmovb2m, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, 0, 0x29, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, 0, 0x29, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, 0, 0x29, }}, {as: AVPMOVWB, ytab: _yvpmovdw, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x30, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x30, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x30, }}, {as: AVPMOVZXBD, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x31, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x31, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x31, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x31, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x31, }}, {as: AVPMOVZXBQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x32, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x32, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN2 | evexZeroingEnabled, 0x32, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x32, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x32, }}, {as: AVPMOVZXBW, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x30, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x30, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x30, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x30, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x30, }}, {as: AVPMOVZXDQ, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x35, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x35, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x35, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x35, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x35, }}, {as: AVPMOVZXWD, ytab: _yvcvtdq2pd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x33, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x33, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x33, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x33, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x33, }}, {as: AVPMOVZXWQ, ytab: _yvbroadcastss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x34, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x34, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x34, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN8 | evexZeroingEnabled, 0x34, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x34, }}, {as: AVPMULDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x28, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x28, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x28, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x28, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x28, }}, {as: AVPMULHRSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0B, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0B, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x0B, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x0B, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x0B, }}, {as: AVPMULHUW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE4, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE4, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE4, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE4, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE4, }}, {as: AVPMULHW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE5, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE5, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE5, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE5, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE5, }}, {as: AVPMULLD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x40, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x40, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x40, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x40, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x40, }}, {as: AVPMULLQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x40, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x40, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x40, }}, {as: AVPMULLW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD5, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD5, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD5, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD5, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD5, }}, {as: AVPMULTISHIFTQB, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x83, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x83, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x83, }}, {as: AVPMULUDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF4, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF4, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xF4, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xF4, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xF4, }}, {as: AVPOPCNTB, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x54, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x54, }}, {as: AVPOPCNTD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x55, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x55, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x55, }}, {as: AVPOPCNTQ, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x55, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x55, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x55, }}, {as: AVPOPCNTW, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x54, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x54, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x54, }}, {as: AVPOR, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEB, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEB, }}, {as: AVPORD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xEB, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xEB, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xEB, }}, {as: AVPORQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xEB, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xEB, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xEB, }}, {as: AVPROLD, ytab: _yvprold, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 01, }}, {as: AVPROLQ, ytab: _yvprold, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 01, }}, {as: AVPROLVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x15, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x15, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x15, }}, {as: AVPROLVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x15, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x15, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x15, }}, {as: AVPRORD, ytab: _yvprold, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 00, }}, {as: AVPRORQ, ytab: _yvprold, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 00, }}, {as: AVPRORVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x14, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x14, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x14, }}, {as: AVPRORVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x14, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x14, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x14, }}, {as: AVPSADBW, ytab: _yvaesdec, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF6, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF6, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0xF6, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0xF6, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0xF6, }}, {as: AVPSCATTERDD, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA0, }}, {as: AVPSCATTERDQ, ytab: _yvpscatterdq, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA0, }}, {as: AVPSCATTERQD, ytab: _yvpscatterqd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA1, }}, {as: AVPSCATTERQQ, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA1, }}, {as: AVPSHLDD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x71, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x71, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x71, }}, {as: AVPSHLDQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x71, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x71, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x71, }}, {as: AVPSHLDVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x71, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x71, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x71, }}, {as: AVPSHLDVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x71, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x71, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x71, }}, {as: AVPSHLDVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x70, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x70, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x70, }}, {as: AVPSHLDW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x70, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x70, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexZeroingEnabled, 0x70, }}, {as: AVPSHRDD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x73, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x73, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x73, }}, {as: AVPSHRDQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, }}, {as: AVPSHRDVD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x73, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x73, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x73, }}, {as: AVPSHRDVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, }}, {as: AVPSHRDVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x72, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x72, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x72, }}, {as: AVPSHRDW, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexZeroingEnabled, 0x72, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexZeroingEnabled, 0x72, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexZeroingEnabled, 0x72, }}, {as: AVPSHUFB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x00, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x00, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexZeroingEnabled, 0x00, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexZeroingEnabled, 0x00, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexZeroingEnabled, 0x00, }}, {as: AVPSHUFBITQMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x8F, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x8F, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x8F, }}, {as: AVPSHUFD, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x70, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x70, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x70, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x70, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x70, }}, {as: AVPSHUFHW, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x70, avxEscape | vex256 | vexF3 | vex0F | vexW0, 0x70, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x70, avxEscape | evex256 | evexF3 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x70, avxEscape | evex512 | evexF3 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x70, }}, {as: AVPSHUFLW, ytab: _yvpshufd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x70, avxEscape | vex256 | vexF2 | vex0F | vexW0, 0x70, avxEscape | evex128 | evexF2 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x70, avxEscape | evex256 | evexF2 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x70, avxEscape | evex512 | evexF2 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x70, }}, {as: AVPSIGNB, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x08, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x08, }}, {as: AVPSIGND, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0A, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0A, }}, {as: AVPSIGNW, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x09, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x09, }}, {as: AVPSLLD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 06, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 06, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF2, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF2, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 06, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF2, }}, {as: AVPSLLDQ, ytab: _yvpslldq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 07, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 07, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x73, 07, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x73, 07, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x73, 07, }}, {as: AVPSLLQ, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 06, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 06, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF3, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF3, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, 06, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xF3, }}, {as: AVPSLLVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x47, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x47, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x47, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x47, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x47, }}, {as: AVPSLLVQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x47, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x47, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x47, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x47, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x47, }}, {as: AVPSLLVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x12, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x12, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x12, }}, {as: AVPSLLW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 06, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 06, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF1, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF1, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 06, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 06, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 06, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF1, }}, {as: AVPSRAD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 04, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 04, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE2, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE2, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 04, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE2, }}, {as: AVPSRAQ, ytab: _yvpsraq, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x72, 04, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xE2, }}, {as: AVPSRAVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x46, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x46, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x46, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x46, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x46, }}, {as: AVPSRAVQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x46, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x46, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x46, }}, {as: AVPSRAVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x11, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x11, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x11, }}, {as: AVPSRAW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 04, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 04, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE1, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE1, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 04, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 04, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 04, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE1, }}, {as: AVPSRLD, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x72, 02, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x72, 02, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD2, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD2, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x72, 02, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD2, }}, {as: AVPSRLDQ, ytab: _yvpslldq, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 03, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 03, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16, 0x73, 03, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32, 0x73, 03, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64, 0x73, 03, }}, {as: AVPSRLQ, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x73, 02, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x73, 02, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD3, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD3, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x73, 02, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN16 | evexZeroingEnabled, 0xD3, }}, {as: AVPSRLVD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x45, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x45, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x45, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x45, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x45, }}, {as: AVPSRLVQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW1, 0x45, avxEscape | vex256 | vex66 | vex0F38 | vexW1, 0x45, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x45, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x45, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x45, }}, {as: AVPSRLVW, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexZeroingEnabled, 0x10, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexZeroingEnabled, 0x10, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexZeroingEnabled, 0x10, }}, {as: AVPSRLW, ytab: _yvpslld, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x71, 02, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x71, 02, avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD1, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD1, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x71, 02, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x71, 02, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x71, 02, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD1, }}, {as: AVPSUBB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF8, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF8, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF8, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF8, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF8, }}, {as: AVPSUBD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFA, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFA, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xFA, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xFA, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xFA, }}, {as: AVPSUBQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xFB, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xFB, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xFB, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xFB, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xFB, }}, {as: AVPSUBSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE8, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE8, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE8, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE8, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE8, }}, {as: AVPSUBSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xE9, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xE9, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xE9, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xE9, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xE9, }}, {as: AVPSUBUSB, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD8, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD8, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD8, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD8, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD8, }}, {as: AVPSUBUSW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xD9, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xD9, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xD9, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xD9, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xD9, }}, {as: AVPSUBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xF9, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xF9, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0xF9, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0xF9, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0xF9, }}, {as: AVPTERNLOGD, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x25, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x25, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x25, }}, {as: AVPTERNLOGQ, ytab: _yvalignd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x25, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x25, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x25, }}, {as: AVPTEST, ytab: _yvptest, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x17, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x17, }}, {as: AVPTESTMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16, 0x26, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32, 0x26, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64, 0x26, }}, {as: AVPTESTMD, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4, 0x27, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4, 0x27, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4, 0x27, }}, {as: AVPTESTMQ, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x27, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x27, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x27, }}, {as: AVPTESTMW, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16, 0x26, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32, 0x26, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64, 0x26, }}, {as: AVPTESTNMB, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN16, 0x26, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN32, 0x26, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN64, 0x26, }}, {as: AVPTESTNMD, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW0, evexN16 | evexBcstN4, 0x27, avxEscape | evex256 | evexF3 | evex0F38 | evexW0, evexN32 | evexBcstN4, 0x27, avxEscape | evex512 | evexF3 | evex0F38 | evexW0, evexN64 | evexBcstN4, 0x27, }}, {as: AVPTESTNMQ, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, evexN16 | evexBcstN8, 0x27, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, evexN32 | evexBcstN8, 0x27, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, evexN64 | evexBcstN8, 0x27, }}, {as: AVPTESTNMW, ytab: _yvpshufbitqmb, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evexF3 | evex0F38 | evexW1, evexN16, 0x26, avxEscape | evex256 | evexF3 | evex0F38 | evexW1, evexN32, 0x26, avxEscape | evex512 | evexF3 | evex0F38 | evexW1, evexN64, 0x26, }}, {as: AVPUNPCKHBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x68, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x68, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x68, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x68, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x68, }}, {as: AVPUNPCKHDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6A, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6A, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x6A, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x6A, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x6A, }}, {as: AVPUNPCKHQDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6D, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6D, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x6D, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x6D, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x6D, }}, {as: AVPUNPCKHWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x69, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x69, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x69, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x69, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x69, }}, {as: AVPUNPCKLBW, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x60, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x60, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x60, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x60, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x60, }}, {as: AVPUNPCKLDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x62, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x62, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x62, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x62, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x62, }}, {as: AVPUNPCKLQDQ, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x6C, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x6C, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x6C, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x6C, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x6C, }}, {as: AVPUNPCKLWD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x61, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x61, avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexZeroingEnabled, 0x61, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexZeroingEnabled, 0x61, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexZeroingEnabled, 0x61, }}, {as: AVPXOR, ytab: _yvaddsubpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xEF, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xEF, }}, {as: AVPXORD, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xEF, avxEscape | evex256 | evex66 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xEF, avxEscape | evex512 | evex66 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xEF, }}, {as: AVPXORQ, ytab: _yvblendmpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xEF, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xEF, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xEF, }}, {as: AVRANGEPD, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x50, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x50, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x50, }}, {as: AVRANGEPS, ytab: _yvfixupimmpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x50, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x50, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x50, }}, {as: AVRANGESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x51, }}, {as: AVRANGESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x51, }}, {as: AVRCP14PD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x4C, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x4C, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x4C, }}, {as: AVRCP14PS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x4C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x4C, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x4C, }}, {as: AVRCP14SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x4D, }}, {as: AVRCP14SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x4D, }}, {as: AVRCP28PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xCA, }}, {as: AVRCP28PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xCA, }}, {as: AVRCP28SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0xCB, }}, {as: AVRCP28SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0xCB, }}, {as: AVRCPPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x53, avxEscape | vex256 | vex0F | vexW0, 0x53, }}, {as: AVRCPSS, ytab: _yvrcpss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x53, }}, {as: AVREDUCEPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x56, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x56, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x56, }}, {as: AVREDUCEPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x56, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x56, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x56, }}, {as: AVREDUCESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x57, }}, {as: AVREDUCESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x57, }}, {as: AVRNDSCALEPD, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0x09, avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x09, avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x09, }}, {as: AVRNDSCALEPS, ytab: _yvgetmantpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0x08, avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x08, avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x08, }}, {as: AVRNDSCALESD, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0x0B, }}, {as: AVRNDSCALESS, ytab: _yvfixupimmsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F3A | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0x0A, }}, {as: AVROUNDPD, ytab: _yvroundpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x09, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x09, }}, {as: AVROUNDPS, ytab: _yvroundpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x08, avxEscape | vex256 | vex66 | vex0F3A | vexW0, 0x08, }}, {as: AVROUNDSD, ytab: _yvdppd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0B, }}, {as: AVROUNDSS, ytab: _yvdppd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F3A | vexW0, 0x0A, }}, {as: AVRSQRT14PD, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x4E, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x4E, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x4E, }}, {as: AVRSQRT14PS, ytab: _yvexpandpd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x4E, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x4E, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x4E, }}, {as: AVRSQRT14SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexZeroingEnabled, 0x4F, }}, {as: AVRSQRT14SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexZeroingEnabled, 0x4F, }}, {as: AVRSQRT28PD, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexSaeEnabled | evexZeroingEnabled, 0xCC, }}, {as: AVRSQRT28PS, ytab: _yvexp2pd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexSaeEnabled | evexZeroingEnabled, 0xCC, }}, {as: AVRSQRT28SD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexSaeEnabled | evexZeroingEnabled, 0xCD, }}, {as: AVRSQRT28SS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexSaeEnabled | evexZeroingEnabled, 0xCD, }}, {as: AVRSQRTPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x52, avxEscape | vex256 | vex0F | vexW0, 0x52, }}, {as: AVRSQRTSS, ytab: _yvrcpss, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x52, }}, {as: AVSCALEFPD, ytab: _yvscalefpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x2C, avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x2C, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x2C, }}, {as: AVSCALEFPS, ytab: _yvscalefpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x2C, avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x2C, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x2C, }}, {as: AVSCALEFSD, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x2D, }}, {as: AVSCALEFSS, ytab: _yvgetexpsd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x2D, }}, {as: AVSCATTERDPD, ytab: _yvpscatterdq, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA2, }}, {as: AVSCATTERDPS, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA2, }}, {as: AVSCATTERPF0DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 05, }}, {as: AVSCATTERPF0DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 05, }}, {as: AVSCATTERPF0QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 05, }}, {as: AVSCATTERPF0QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 05, }}, {as: AVSCATTERPF1DPD, ytab: _yvgatherpf0dpd, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC6, 06, }}, {as: AVSCATTERPF1DPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC6, 06, }}, {as: AVSCATTERPF1QPD, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xC7, 06, }}, {as: AVSCATTERPF1QPS, ytab: _yvgatherpf0dps, prefix: Pavx, op: opBytes{ avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xC7, 06, }}, {as: AVSCATTERQPD, ytab: _yvpscatterdd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, avxEscape | evex256 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, avxEscape | evex512 | evex66 | evex0F38 | evexW1, evexN8, 0xA3, }}, {as: AVSCATTERQPS, ytab: _yvpscatterqd, prefix: Pavx, op: opBytes{ avxEscape | evex128 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, avxEscape | evex256 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, avxEscape | evex512 | evex66 | evex0F38 | evexW0, evexN4, 0xA3, }}, {as: AVSHUFF32X4, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x23, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x23, }}, {as: AVSHUFF64X2, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x23, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x23, }}, {as: AVSHUFI32X4, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x43, avxEscape | evex512 | evex66 | evex0F3A | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x43, }}, {as: AVSHUFI64X2, ytab: _yvshuff32x4, prefix: Pavx, op: opBytes{ avxEscape | evex256 | evex66 | evex0F3A | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x43, avxEscape | evex512 | evex66 | evex0F3A | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x43, }}, {as: AVSHUFPD, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0xC6, avxEscape | vex256 | vex66 | vex0F | vexW0, 0xC6, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0xC6, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0xC6, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0xC6, }}, {as: AVSHUFPS, ytab: _yvgf2p8affineinvqb, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0xC6, avxEscape | vex256 | vex0F | vexW0, 0xC6, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0xC6, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0xC6, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0xC6, }}, {as: AVSQRTPD, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x51, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x51, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x51, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x51, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x51, }}, {as: AVSQRTPS, ytab: _yvcvtdq2ps, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x51, avxEscape | vex256 | vex0F | vexW0, 0x51, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x51, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x51, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x51, }}, {as: AVSQRTSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x51, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x51, }}, {as: AVSQRTSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x51, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x51, }}, {as: AVSTMXCSR, ytab: _yvldmxcsr, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0xAE, 03, }}, {as: AVSUBPD, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x5C, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x5C, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x5C, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x5C, }}, {as: AVSUBPS, ytab: _yvaddpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x5C, avxEscape | vex256 | vex0F | vexW0, 0x5C, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x5C, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x5C, }}, {as: AVSUBSD, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF2 | vex0F | vexW0, 0x5C, avxEscape | evex128 | evexF2 | evex0F | evexW1, evexN8 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, }}, {as: AVSUBSS, ytab: _yvaddsd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vexF3 | vex0F | vexW0, 0x5C, avxEscape | evex128 | evexF3 | evex0F | evexW0, evexN4 | evexRoundingEnabled | evexZeroingEnabled, 0x5C, }}, {as: AVTESTPD, ytab: _yvptest, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0F, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0F, }}, {as: AVTESTPS, ytab: _yvptest, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F38 | vexW0, 0x0E, avxEscape | vex256 | vex66 | vex0F38 | vexW0, 0x0E, }}, {as: AVUCOMISD, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x2E, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN8 | evexSaeEnabled, 0x2E, }}, {as: AVUCOMISS, ytab: _yvcomisd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x2E, avxEscape | evex128 | evex0F | evexW0, evexN4 | evexSaeEnabled, 0x2E, }}, {as: AVUNPCKHPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x15, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x15, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x15, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x15, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x15, }}, {as: AVUNPCKHPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x15, avxEscape | vex256 | vex0F | vexW0, 0x15, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x15, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x15, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x15, }}, {as: AVUNPCKLPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x14, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x14, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x14, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x14, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x14, }}, {as: AVUNPCKLPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x14, avxEscape | vex256 | vex0F | vexW0, 0x14, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x14, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x14, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x14, }}, {as: AVXORPD, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex66 | vex0F | vexW0, 0x57, avxEscape | vex256 | vex66 | vex0F | vexW0, 0x57, avxEscape | evex128 | evex66 | evex0F | evexW1, evexN16 | evexBcstN8 | evexZeroingEnabled, 0x57, avxEscape | evex256 | evex66 | evex0F | evexW1, evexN32 | evexBcstN8 | evexZeroingEnabled, 0x57, avxEscape | evex512 | evex66 | evex0F | evexW1, evexN64 | evexBcstN8 | evexZeroingEnabled, 0x57, }}, {as: AVXORPS, ytab: _yvandnpd, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x57, avxEscape | vex256 | vex0F | vexW0, 0x57, avxEscape | evex128 | evex0F | evexW0, evexN16 | evexBcstN4 | evexZeroingEnabled, 0x57, avxEscape | evex256 | evex0F | evexW0, evexN32 | evexBcstN4 | evexZeroingEnabled, 0x57, avxEscape | evex512 | evex0F | evexW0, evexN64 | evexBcstN4 | evexZeroingEnabled, 0x57, }}, {as: AVZEROALL, ytab: _yvzeroall, prefix: Pavx, op: opBytes{ avxEscape | vex256 | vex0F | vexW0, 0x77, }}, {as: AVZEROUPPER, ytab: _yvzeroall, prefix: Pavx, op: opBytes{ avxEscape | vex128 | vex0F | vexW0, 0x77, }}, }
go/src/cmd/internal/obj/x86/avx_optabs.go/0
{ "file_path": "go/src/cmd/internal/obj/x86/avx_optabs.go", "repo_id": "go", "token_count": 138195 }
159
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package objabi import ( "internal/testenv" "os/exec" "strings" "testing" ) var escapeTests = []struct { Path string Escaped string }{ {"foo/bar/v1", "foo/bar/v1"}, {"foo/bar/v.1", "foo/bar/v%2e1"}, {"f.o.o/b.a.r/v1", "f.o.o/b.a.r/v1"}, {"f.o.o/b.a.r/v.1", "f.o.o/b.a.r/v%2e1"}, {"f.o.o/b.a.r/v..1", "f.o.o/b.a.r/v%2e%2e1"}, {"f.o.o/b.a.r/v..1.", "f.o.o/b.a.r/v%2e%2e1%2e"}, {"f.o.o/b.a.r/v%1", "f.o.o/b.a.r/v%251"}, {"runtime", "runtime"}, {"sync/atomic", "sync/atomic"}, {"golang.org/x/tools/godoc", "golang.org/x/tools/godoc"}, {"foo.bar/baz.quux", "foo.bar/baz%2equux"}, {"", ""}, {"%foo%bar", "%25foo%25bar"}, {"\x01\x00\x7F☺", "%01%00%7f%e2%98%ba"}, } func TestPathToPrefix(t *testing.T) { for _, tc := range escapeTests { if got := PathToPrefix(tc.Path); got != tc.Escaped { t.Errorf("expected PathToPrefix(%s) = %s, got %s", tc.Path, tc.Escaped, got) } } } func TestPrefixToPath(t *testing.T) { for _, tc := range escapeTests { got, err := PrefixToPath(tc.Escaped) if err != nil { t.Errorf("expected PrefixToPath(%s) err = nil, got %v", tc.Escaped, err) } if got != tc.Path { t.Errorf("expected PrefixToPath(%s) = %s, got %s", tc.Escaped, tc.Path, got) } } } func TestPrefixToPathError(t *testing.T) { tests := []string{ "foo%", "foo%1", "foo%%12", "foo%1g", } for _, tc := range tests { _, err := PrefixToPath(tc) if err == nil { t.Errorf("expected PrefixToPath(%s) err != nil, got nil", tc) } } } func TestRuntimePackageList(t *testing.T) { // Test that all packages imported by the runtime are marked as runtime // packages. testenv.MustHaveGoBuild(t) goCmd, err := testenv.GoTool() if err != nil { t.Fatal(err) } pkgList, err := exec.Command(goCmd, "list", "-deps", "runtime").Output() if err != nil { if err, ok := err.(*exec.ExitError); ok { t.Log(string(err.Stderr)) } t.Fatal(err) } for _, pkg := range strings.Split(strings.TrimRight(string(pkgList), "\n"), "\n") { if pkg == "unsafe" { continue } if !LookupPkgSpecial(pkg).Runtime { t.Errorf("package %s is imported by runtime, but not marked Runtime", pkg) } } }
go/src/cmd/internal/objabi/path_test.go/0
{ "file_path": "go/src/cmd/internal/objabi/path_test.go", "repo_id": "go", "token_count": 1114 }
160
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pkgpath import ( "internal/testenv" "os" "testing" ) const testEnvName = "GO_PKGPATH_TEST_COMPILER" // This init function supports TestToSymbolFunc. For simplicity, // we use the test binary itself as a sample gccgo driver. // We set an environment variable to specify how it should behave. func init() { switch os.Getenv(testEnvName) { case "": return case "v1": os.Stdout.WriteString(`.string "go.l__ufer.Run"`) os.Exit(0) case "v2": os.Stdout.WriteString(`.string "go.l..u00e4ufer.Run"`) os.Exit(0) case "v3": os.Stdout.WriteString(`.string "go_0l_u00e4ufer.Run"`) os.Exit(0) case "error": os.Stdout.WriteString(`unknown string`) os.Exit(0) } } func TestToSymbolFunc(t *testing.T) { testenv.MustHaveExec(t) const input = "pä世🜃" tests := []struct { env string fail bool mangled string }{ { env: "v1", mangled: "p___", }, { env: "v2", mangled: "p..u00e4..u4e16..U0001f703", }, { env: "v3", mangled: "p_u00e4_u4e16_U0001f703", }, { env: "error", fail: true, }, } cmd := os.Args[0] tmpdir := t.TempDir() defer os.Unsetenv(testEnvName) for _, test := range tests { t.Run(test.env, func(t *testing.T) { os.Setenv(testEnvName, test.env) fn, err := ToSymbolFunc(cmd, tmpdir) if err != nil { if !test.fail { t.Errorf("ToSymbolFunc(%q, %q): unexpected error %v", cmd, tmpdir, err) } } else if test.fail { t.Errorf("ToSymbolFunc(%q, %q) succeeded but expected to fail", cmd, tmpdir) } else if got, want := fn(input), test.mangled; got != want { t.Errorf("ToSymbolFunc(%q, %q)(%q) = %q, want %q", cmd, tmpdir, input, got, want) } }) } } var symbolTests = []struct { input, v1, v2, v3 string }{ { "", "", "", "", }, { "bytes", "bytes", "bytes", "bytes", }, { "net/http", "net_http", "net..z2fhttp", "net_1http", }, { "golang.org/x/net/http", "golang_org_x_net_http", "golang.x2eorg..z2fx..z2fnet..z2fhttp", "golang_0org_1x_1net_1http", }, { "pä世.🜃", "p____", "p..u00e4..u4e16.x2e..U0001f703", "p_u00e4_u4e16_0_U0001f703", }, } func TestV1(t *testing.T) { for _, test := range symbolTests { if got, want := toSymbolV1(test.input), test.v1; got != want { t.Errorf("toSymbolV1(%q) = %q, want %q", test.input, got, want) } } } func TestV2(t *testing.T) { for _, test := range symbolTests { if got, want := toSymbolV2(test.input), test.v2; got != want { t.Errorf("toSymbolV2(%q) = %q, want %q", test.input, got, want) } } } func TestV3(t *testing.T) { for _, test := range symbolTests { if got, want := toSymbolV3(test.input), test.v3; got != want { t.Errorf("toSymbolV3(%q) = %q, want %q", test.input, got, want) } } }
go/src/cmd/internal/pkgpath/pkgpath_test.go/0
{ "file_path": "go/src/cmd/internal/pkgpath/pkgpath_test.go", "repo_id": "go", "token_count": 1434 }
161
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package test2json import ( "bytes" "encoding/json" "flag" "fmt" "io" "os" "path/filepath" "reflect" "strings" "testing" "unicode/utf8" ) var update = flag.Bool("update", false, "rewrite testdata/*.json files") func TestGolden(t *testing.T) { files, err := filepath.Glob("testdata/*.test") if err != nil { t.Fatal(err) } for _, file := range files { name := strings.TrimSuffix(filepath.Base(file), ".test") t.Run(name, func(t *testing.T) { orig, err := os.ReadFile(file) if err != nil { t.Fatal(err) } // Test one line written to c at a time. // Assume that's the most likely to be handled correctly. var buf bytes.Buffer c := NewConverter(&buf, "", 0) in := append([]byte{}, orig...) for _, line := range bytes.SplitAfter(in, []byte("\n")) { writeAndKill(c, line) } c.Close() if *update { js := strings.TrimSuffix(file, ".test") + ".json" t.Logf("rewriting %s", js) if err := os.WriteFile(js, buf.Bytes(), 0666); err != nil { t.Fatal(err) } return } want, err := os.ReadFile(strings.TrimSuffix(file, ".test") + ".json") if err != nil { t.Fatal(err) } diffJSON(t, buf.Bytes(), want) if t.Failed() { // If the line-at-a-time conversion fails, no point testing boundary conditions. return } // Write entire input in bulk. t.Run("bulk", func(t *testing.T) { buf.Reset() c = NewConverter(&buf, "", 0) in = append([]byte{}, orig...) writeAndKill(c, in) c.Close() diffJSON(t, buf.Bytes(), want) }) // In bulk again with \r\n. t.Run("crlf", func(t *testing.T) { buf.Reset() c = NewConverter(&buf, "", 0) in = bytes.ReplaceAll(orig, []byte("\n"), []byte("\r\n")) writeAndKill(c, in) c.Close() diffJSON(t, bytes.ReplaceAll(buf.Bytes(), []byte(`\r\n`), []byte(`\n`)), want) }) // Write 2 bytes at a time on even boundaries. t.Run("even2", func(t *testing.T) { buf.Reset() c = NewConverter(&buf, "", 0) in = append([]byte{}, orig...) for i := 0; i < len(in); i += 2 { if i+2 <= len(in) { writeAndKill(c, in[i:i+2]) } else { writeAndKill(c, in[i:]) } } c.Close() diffJSON(t, buf.Bytes(), want) }) // Write 2 bytes at a time on odd boundaries. t.Run("odd2", func(t *testing.T) { buf.Reset() c = NewConverter(&buf, "", 0) in = append([]byte{}, orig...) if len(in) > 0 { writeAndKill(c, in[:1]) } for i := 1; i < len(in); i += 2 { if i+2 <= len(in) { writeAndKill(c, in[i:i+2]) } else { writeAndKill(c, in[i:]) } } c.Close() diffJSON(t, buf.Bytes(), want) }) // Test with very small output buffers, to check that // UTF8 sequences are not broken up. for b := 5; b <= 8; b++ { t.Run(fmt.Sprintf("tiny%d", b), func(t *testing.T) { oldIn := inBuffer oldOut := outBuffer defer func() { inBuffer = oldIn outBuffer = oldOut }() inBuffer = 64 outBuffer = b buf.Reset() c = NewConverter(&buf, "", 0) in = append([]byte{}, orig...) writeAndKill(c, in) c.Close() diffJSON(t, buf.Bytes(), want) }) } }) } } // writeAndKill writes b to w and then fills b with Zs. // The filling makes sure that if w is holding onto b for // future use, that future use will have obviously wrong data. func writeAndKill(w io.Writer, b []byte) { w.Write(b) for i := range b { b[i] = 'Z' } } // diffJSON diffs the stream we have against the stream we want // and fails the test with a useful message if they don't match. func diffJSON(t *testing.T, have, want []byte) { t.Helper() type event map[string]any // Parse into events, one per line. parseEvents := func(b []byte) ([]event, []string) { t.Helper() var events []event var lines []string for _, line := range bytes.SplitAfter(b, []byte("\n")) { if len(line) > 0 { line = bytes.TrimSpace(line) var e event err := json.Unmarshal(line, &e) if err != nil { t.Errorf("unmarshal %s: %v", b, err) continue } events = append(events, e) lines = append(lines, string(line)) } } return events, lines } haveEvents, haveLines := parseEvents(have) wantEvents, wantLines := parseEvents(want) if t.Failed() { return } // Make sure the events we have match the events we want. // At each step we're matching haveEvents[i] against wantEvents[j]. // i and j can move independently due to choices about exactly // how to break up text in "output" events. i := 0 j := 0 // Fail reports a failure at the current i,j and stops the test. // It shows the events around the current positions, // with the current positions marked. fail := func() { var buf bytes.Buffer show := func(i int, lines []string) { for k := -2; k < 5; k++ { marker := "" if k == 0 { marker = "» " } if 0 <= i+k && i+k < len(lines) { fmt.Fprintf(&buf, "\t%s%s\n", marker, lines[i+k]) } } if i >= len(lines) { // show marker after end of input fmt.Fprintf(&buf, "\t» \n") } } fmt.Fprintf(&buf, "have:\n") show(i, haveLines) fmt.Fprintf(&buf, "want:\n") show(j, wantLines) t.Fatal(buf.String()) } var outputTest string // current "Test" key in "output" events var wantOutput, haveOutput string // collected "Output" of those events // getTest returns the "Test" setting, or "" if it is missing. getTest := func(e event) string { s, _ := e["Test"].(string) return s } // checkOutput collects output from the haveEvents for the current outputTest // and then checks that the collected output matches the wanted output. checkOutput := func() { for i < len(haveEvents) && haveEvents[i]["Action"] == "output" && getTest(haveEvents[i]) == outputTest { haveOutput += haveEvents[i]["Output"].(string) i++ } if haveOutput != wantOutput { t.Errorf("output mismatch for Test=%q:\nhave %q\nwant %q", outputTest, haveOutput, wantOutput) fail() } haveOutput = "" wantOutput = "" } // Walk through wantEvents matching against haveEvents. for j = range wantEvents { e := wantEvents[j] if e["Action"] == "output" && getTest(e) == outputTest { wantOutput += e["Output"].(string) continue } checkOutput() if e["Action"] == "output" { outputTest = getTest(e) wantOutput += e["Output"].(string) continue } if i >= len(haveEvents) { t.Errorf("early end of event stream: missing event") fail() } if !reflect.DeepEqual(haveEvents[i], e) { t.Errorf("events out of sync") fail() } i++ } checkOutput() if i < len(haveEvents) { t.Errorf("extra events in stream") fail() } } func TestTrimUTF8(t *testing.T) { s := "hello α ☺ 😂 world" // α is 2-byte, ☺ is 3-byte, 😂 is 4-byte b := []byte(s) for i := 0; i < len(s); i++ { j := trimUTF8(b[:i]) u := string([]rune(s[:j])) + string([]rune(s[j:])) if u != s { t.Errorf("trimUTF8(%q) = %d (-%d), not at boundary (split: %q %q)", s[:i], j, i-j, s[:j], s[j:]) } if utf8.FullRune(b[j:i]) { t.Errorf("trimUTF8(%q) = %d (-%d), too early (missed: %q)", s[:j], j, i-j, s[j:i]) } } }
go/src/cmd/internal/test2json/test2json_test.go/0
{ "file_path": "go/src/cmd/internal/test2json/test2json_test.go", "repo_id": "go", "token_count": 3227 }
162
=== RUN TestAddrStringAllocs === RUN TestAddrStringAllocs/zero === NAME TestAddrStringAllocs === RUN TestAddrStringAllocs/ipv4 === NAME TestAddrStringAllocs === RUN TestAddrStringAllocs/ipv6 === NAME TestAddrStringAllocs === RUN TestAddrStringAllocs/ipv6+zone === NAME TestAddrStringAllocs === RUN TestAddrStringAllocs/ipv4-in-ipv6 === NAME TestAddrStringAllocs === RUN TestAddrStringAllocs/ipv4-in-ipv6+zone === NAME TestAddrStringAllocs --- PASS: TestAddrStringAllocs (0.00s)  --- PASS: TestAddrStringAllocs/zero (0.00s)  --- PASS: TestAddrStringAllocs/ipv4 (0.00s)  --- PASS: TestAddrStringAllocs/ipv6 (0.00s)  --- PASS: TestAddrStringAllocs/ipv6+zone (0.00s)  --- PASS: TestAddrStringAllocs/ipv4-in-ipv6 (0.00s)  --- PASS: TestAddrStringAllocs/ipv4-in-ipv6+zone (0.00s) === NAME === RUN TestPrefixString --- PASS: TestPrefixString (0.00s) === NAME === RUN TestInvalidAddrPortString --- PASS: TestInvalidAddrPortString (0.00s) === NAME === RUN TestAsSlice --- PASS: TestAsSlice (0.00s) === NAME === NAME TestInlining inlining_test.go:102: not in expected set, but also inlinable: "Addr.string4" inlining_test.go:102: not in expected set, but also inlinable: "Prefix.isZero" inlining_test.go:102: not in expected set, but also inlinable: "IPv4Unspecified" inlining_test.go:102: not in expected set, but also inlinable: "joinHostPort" inlining_test.go:102: not in expected set, but also inlinable: "Addr.MarshalBinary" inlining_test.go:102: not in expected set, but also inlinable: "bePutUint64" inlining_test.go:102: not in expected set, but also inlinable: "mask6" inlining_test.go:102: not in expected set, but also inlinable: "AddrPort.isZero" inlining_test.go:102: not in expected set, but also inlinable: "stringsLastIndexByte" inlining_test.go:102: not in expected set, but also inlinable: "Addr.isZero" inlining_test.go:102: not in expected set, but also inlinable: "bePutUint32" inlining_test.go:102: not in expected set, but also inlinable: "leUint16" inlining_test.go:102: not in expected set, but also inlinable: "Addr.string6" inlining_test.go:102: not in expected set, but also inlinable: "beUint64" inlining_test.go:102: not in expected set, but also inlinable: "appendHexPad" inlining_test.go:102: not in expected set, but also inlinable: "lePutUint16" --- PASS: TestInlining (0.10s) === RUN FuzzParse fuzz: elapsed: 0s, gathering baseline coverage: 0/390 completed fuzz: elapsed: 0s, gathering baseline coverage: 390/390 completed, now fuzzing with 16 workers fuzz: elapsed: 3s, execs: 438666 (146173/sec), new interesting: 12 (total: 402) fuzz: elapsed: 4s, execs: 558467 (147850/sec), new interesting: 15 (total: 405) --- PASS: FuzzParse (3.85s) === NAME PASS
go/src/cmd/internal/test2json/testdata/framefuzz.test/0
{ "file_path": "go/src/cmd/internal/test2json/testdata/framefuzz.test", "repo_id": "go", "token_count": 1125 }
163
=== RUN TestVet === PAUSE TestVet === RUN TestVetAsm === PAUSE TestVetAsm === RUN TestVetDirs === PAUSE TestVetDirs === RUN TestTags === PAUSE TestTags === RUN TestVetVerbose === PAUSE TestVetVerbose === CONT TestVet === CONT TestTags === CONT TestVetVerbose === RUN TestTags/testtag === PAUSE TestTags/testtag === CONT TestVetDirs === CONT TestVetAsm === RUN TestVet/0 === PAUSE TestVet/0 === RUN TestVet/1 === PAUSE TestVet/1 === RUN TestVet/2 === PAUSE TestVet/2 === RUN TestVet/3 === PAUSE TestVet/3 === RUN TestVet/4 === RUN TestTags/x_testtag_y === PAUSE TestVet/4 === RUN TestVet/5 === PAUSE TestVet/5 === PAUSE TestTags/x_testtag_y === RUN TestVet/6 === RUN TestTags/x,testtag,y === PAUSE TestTags/x,testtag,y === RUN TestVetDirs/testingpkg === PAUSE TestVet/6 === CONT TestTags/x,testtag,y === PAUSE TestVetDirs/testingpkg === RUN TestVetDirs/divergent === RUN TestVet/7 === PAUSE TestVet/7 === PAUSE TestVetDirs/divergent === CONT TestTags/x_testtag_y === CONT TestTags/testtag === RUN TestVetDirs/buildtag === PAUSE TestVetDirs/buildtag === CONT TestVet/0 === CONT TestVet/4 === RUN TestVetDirs/incomplete === PAUSE TestVetDirs/incomplete === RUN TestVetDirs/cgo === PAUSE TestVetDirs/cgo === CONT TestVet/7 === CONT TestVet/6 --- PASS: TestVetVerbose (0.04s) === CONT TestVet/5 === CONT TestVet/3 === CONT TestVet/2 --- PASS: TestTags (0.00s) --- PASS: TestTags/x_testtag_y (0.04s) vet_test.go:187: -tags=x testtag y --- PASS: TestTags/x,testtag,y (0.04s) vet_test.go:187: -tags=x,testtag,y --- PASS: TestTags/testtag (0.04s) vet_test.go:187: -tags=testtag === CONT TestVet/1 === CONT TestVetDirs/testingpkg === CONT TestVetDirs/buildtag === CONT TestVetDirs/divergent === CONT TestVetDirs/incomplete === CONT TestVetDirs/cgo --- PASS: TestVet (0.39s) --- PASS: TestVet/5 (0.07s) vet_test.go:114: files: ["testdata/copylock_func.go" "testdata/rangeloop.go"] --- PASS: TestVet/3 (0.07s) vet_test.go:114: files: ["testdata/composite.go" "testdata/nilfunc.go"] --- PASS: TestVet/6 (0.07s) vet_test.go:114: files: ["testdata/copylock_range.go" "testdata/shadow.go"] --- PASS: TestVet/2 (0.07s) vet_test.go:114: files: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] --- PASS: TestVet/0 (0.13s) vet_test.go:114: files: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] --- PASS: TestVet/4 (0.16s) vet_test.go:114: files: ["testdata/copylock.go" "testdata/print.go"] --- PASS: TestVet/1 (0.07s) vet_test.go:114: files: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] --- PASS: TestVet/7 (0.19s) vet_test.go:114: files: ["testdata/deadcode.go" "testdata/shift.go"] --- PASS: TestVetDirs (0.01s) --- PASS: TestVetDirs/testingpkg (0.06s) --- PASS: TestVetDirs/divergent (0.05s) --- PASS: TestVetDirs/buildtag (0.06s) --- PASS: TestVetDirs/incomplete (0.05s) --- PASS: TestVetDirs/cgo (0.04s) --- PASS: TestVetAsm (0.75s) PASS ok cmd/vet (cached)
go/src/cmd/internal/test2json/testdata/vet.test/0
{ "file_path": "go/src/cmd/internal/test2json/testdata/vet.test", "repo_id": "go", "token_count": 1435 }
164
// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package benchmark import ( "testing" ) func TestMakeBenchString(t *testing.T) { tests := []struct { have, want string }{ {"foo", "BenchmarkFoo"}, {" foo ", "BenchmarkFoo"}, {"foo bar", "BenchmarkFooBar"}, } for i, test := range tests { if v := makeBenchString(test.have); test.want != v { t.Errorf("test[%d] makeBenchString(%q) == %q, want %q", i, test.have, v, test.want) } } } func TestPProfFlag(t *testing.T) { tests := []struct { name string want bool }{ {"", false}, {"foo", true}, } for i, test := range tests { b := New(GC, test.name) if v := b.shouldPProf(); test.want != v { t.Errorf("test[%d] shouldPProf() == %v, want %v", i, v, test.want) } } } func TestPProfNames(t *testing.T) { want := "foo_BenchmarkTest.cpuprof" if v := makePProfFilename("foo", "test", "cpuprof"); v != want { t.Errorf("makePProfFilename() == %q, want %q", v, want) } } // Ensure that public APIs work with a nil Metrics object. func TestNilBenchmarkObject(t *testing.T) { var b *Metrics b.Start("TEST") b.Report(nil) }
go/src/cmd/link/internal/benchmark/bench_test.go/0
{ "file_path": "go/src/cmd/link/internal/benchmark/bench_test.go", "repo_id": "go", "token_count": 490 }
165
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build wasm || windows package ld const syscallExecSupported = false func (ctxt *Link) execArchive(argv []string) { panic("should never arrive here") }
go/src/cmd/link/internal/ld/execarchive_noexec.go/0
{ "file_path": "go/src/cmd/link/internal/ld/execarchive_noexec.go", "repo_id": "go", "token_count": 95 }
166
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ld import ( "fmt" "internal/testenv" "os" "regexp" "strconv" "testing" ) // See also $GOROOT/test/nosplit.go for multi-platform edge case tests. func TestStackCheckOutput(t *testing.T) { testenv.MustHaveGoBuild(t) t.Parallel() cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", os.DevNull, "./testdata/stackcheck") // The rules for computing frame sizes on all of the // architectures are complicated, so just do this on amd64. cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux") outB, err := cmd.CombinedOutput() if err == nil { t.Fatalf("expected link to fail") } out := string(outB) t.Logf("linker output:\n%s", out) // Get expected limit. limitRe := regexp.MustCompile(`nosplit stack over (\d+) byte limit`) m := limitRe.FindStringSubmatch(out) if m == nil { t.Fatalf("no overflow errors in output") } limit, _ := strconv.Atoi(m[1]) wantMap := map[string]string{ "main.startSelf": fmt.Sprintf( `main.startSelf<0> grows 1008 bytes %d bytes over limit `, 1008-limit), "main.startChain": fmt.Sprintf( `main.startChain<0> grows 32 bytes, calls main.chain0<0> grows 48 bytes, calls main.chainEnd<0> grows 1008 bytes %d bytes over limit grows 32 bytes, calls main.chain2<0> grows 80 bytes, calls main.chainEnd<0> grows 1008 bytes %d bytes over limit `, 32+48+1008-limit, 32+80+1008-limit), "main.startRec": `main.startRec<0> grows 8 bytes, calls main.startRec0<0> grows 8 bytes, calls main.startRec<0> infinite cycle `, } // Parse stanzas stanza := regexp.MustCompile(`^(.*): nosplit stack over \d+ byte limit\n(.*\n(?: .*\n)*)`) // Strip comments from cmd/go out = regexp.MustCompile(`(?m)^#.*\n`).ReplaceAllString(out, "") for len(out) > 0 { m := stanza.FindStringSubmatch(out) if m == nil { t.Fatalf("unexpected output:\n%s", out) } out = out[len(m[0]):] fn := m[1] got := m[2] want, ok := wantMap[fn] if !ok { t.Errorf("unexpected function: %s", fn) } else if want != got { t.Errorf("want:\n%sgot:\n%s", want, got) } } }
go/src/cmd/link/internal/ld/stackcheck_test.go/0
{ "file_path": "go/src/cmd/link/internal/ld/stackcheck_test.go", "repo_id": "go", "token_count": 967 }
167
// This file is needed to make "go build" work for package with external functions.
go/src/cmd/link/internal/ld/testdata/issue10978/main.s/0
{ "file_path": "go/src/cmd/link/internal/ld/testdata/issue10978/main.s", "repo_id": "go", "token_count": 19 }
168
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ld import ( "cmd/internal/objabi" "cmd/link/internal/loader" "cmd/link/internal/sym" "sort" ) type byTypeStr []typelinkSortKey type typelinkSortKey struct { TypeStr string Type loader.Sym } func (s byTypeStr) Less(i, j int) bool { return s[i].TypeStr < s[j].TypeStr } func (s byTypeStr) Len() int { return len(s) } func (s byTypeStr) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // typelink generates the typelink table which is used by reflect.typelinks(). // Types that should be added to the typelinks table are marked with the // MakeTypelink attribute by the compiler. func (ctxt *Link) typelink() { ldr := ctxt.loader typelinks := byTypeStr{} var itabs []loader.Sym for s := loader.Sym(1); s < loader.Sym(ldr.NSym()); s++ { if !ldr.AttrReachable(s) { continue } if ldr.IsTypelink(s) { typelinks = append(typelinks, typelinkSortKey{decodetypeStr(ldr, ctxt.Arch, s), s}) } else if ldr.IsItab(s) { itabs = append(itabs, s) } } sort.Sort(typelinks) tl := ldr.CreateSymForUpdate("runtime.typelink", 0) tl.SetType(sym.STYPELINK) ldr.SetAttrLocal(tl.Sym(), true) tl.SetSize(int64(4 * len(typelinks))) tl.Grow(tl.Size()) relocs := tl.AddRelocs(len(typelinks)) for i, s := range typelinks { r := relocs.At(i) r.SetSym(s.Type) r.SetOff(int32(i * 4)) r.SetSiz(4) r.SetType(objabi.R_ADDROFF) } ptrsize := ctxt.Arch.PtrSize il := ldr.CreateSymForUpdate("runtime.itablink", 0) il.SetType(sym.SITABLINK) ldr.SetAttrLocal(il.Sym(), true) il.SetSize(int64(ptrsize * len(itabs))) il.Grow(il.Size()) relocs = il.AddRelocs(len(itabs)) for i, s := range itabs { r := relocs.At(i) r.SetSym(s) r.SetOff(int32(i * ptrsize)) r.SetSiz(uint8(ptrsize)) r.SetType(objabi.R_ADDR) } }
go/src/cmd/link/internal/ld/typelink.go/0
{ "file_path": "go/src/cmd/link/internal/ld/typelink.go", "repo_id": "go", "token_count": 850 }
169
// Inferno utils/8l/asm.c // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/8l/asm.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package sym // Terrible but standard terminology. // A segment describes a block of file to load into memory. // A section further describes the pieces of that block for // use in debuggers and such. type Segment struct { Rwx uint8 // permission as usual unix bits (5 = r-x etc) Vaddr uint64 // virtual address Length uint64 // length in memory Fileoff uint64 // file offset Filelen uint64 // length on disk Sections []*Section } type Section struct { Rwx uint8 Extnum int16 Align int32 Name string Vaddr uint64 Length uint64 Seg *Segment Elfsect interface{} // an *ld.ElfShdr Reloff uint64 Rellen uint64 // Relcount is the number of *host* relocations applied to this section // (when external linking). // Incremented atomically on multiple goroutines. // Note: this may differ from number of Go relocations, as one Go relocation // may turn into multiple host relocations. Relcount uint32 Sym LoaderSym // symbol for the section, if any Index uint16 // each section has a unique index, used internally Compressed bool }
go/src/cmd/link/internal/sym/segment.go/0
{ "file_path": "go/src/cmd/link/internal/sym/segment.go", "repo_id": "go", "token_count": 820 }
170
// Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Existing pull linknames in the wild are allowed _for now_, // for legacy reason. Test a function and a method. // NOTE: this may not be allowed in the future. Don't do this! package main import ( _ "reflect" "unsafe" ) //go:linkname noescape runtime.noescape func noescape(unsafe.Pointer) unsafe.Pointer //go:linkname rtype_String reflect.(*rtype).String func rtype_String(unsafe.Pointer) string func main() { println(rtype_String(noescape(nil))) }
go/src/cmd/link/testdata/linkname/badlinkname.go/0
{ "file_path": "go/src/cmd/link/testdata/linkname/badlinkname.go", "repo_id": "go", "token_count": 195 }
171
package main import "fmt" import "C" func main() { Println("hello, world") if flag { //line fmthello.go:999999 Println("bad line") for { } } } //go:noinline func Println(s string) { fmt.Println(s) } var flag bool
go/src/cmd/objdump/testdata/fmthellocgo.go/0
{ "file_path": "go/src/cmd/objdump/testdata/fmthellocgo.go", "repo_id": "go", "token_count": 103 }
172
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "flag" "internal/testenv" "io/fs" "os" "path/filepath" "testing" "golang.org/x/build/relnote" ) var flagCheck = flag.Bool("check", false, "run API release note checks") // Check that each file in api/next has corresponding release note files in doc/next. func TestCheckAPIFragments(t *testing.T) { if !*flagCheck { t.Skip("-check not specified") } root := testenv.GOROOT(t) rootFS := os.DirFS(root) files, err := fs.Glob(rootFS, "api/next/*.txt") if err != nil { t.Fatal(err) } t.Logf("checking release notes for %d files in api/next", len(files)) docFS := os.DirFS(filepath.Join(root, "doc", "next")) // Check that each api/next file has a corresponding release note fragment. for _, apiFile := range files { if err := relnote.CheckAPIFile(rootFS, apiFile, docFS, "doc/next"); err != nil { t.Errorf("%s: %v", apiFile, err) } } }
go/src/cmd/relnote/relnote_test.go/0
{ "file_path": "go/src/cmd/relnote/relnote_test.go", "repo_id": "go", "token_count": 390 }
173
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "cmp" "fmt" "html/template" "internal/trace" "internal/trace/traceviewer" "log" "net/http" "slices" "strings" "time" ) // UserTasksHandlerFunc returns a HandlerFunc that reports all tasks found in the trace. func UserTasksHandlerFunc(t *parsedTrace) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { tasks := t.summary.Tasks // Summarize groups of tasks with the same name. summary := make(map[string]taskStats) for _, task := range tasks { stats, ok := summary[task.Name] if !ok { stats.Type = task.Name } stats.add(task) summary[task.Name] = stats } // Sort tasks by type. userTasks := make([]taskStats, 0, len(summary)) for _, stats := range summary { userTasks = append(userTasks, stats) } slices.SortFunc(userTasks, func(a, b taskStats) int { return cmp.Compare(a.Type, b.Type) }) // Emit table. err := templUserTaskTypes.Execute(w, userTasks) if err != nil { http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) return } } } type taskStats struct { Type string Count int // Complete + incomplete tasks Histogram traceviewer.TimeHistogram // Complete tasks only } func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string { return func(min, max time.Duration) string { return fmt.Sprintf("/usertask?type=%s&complete=%v&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), template.URLQueryEscaper(complete), template.URLQueryEscaper(min), template.URLQueryEscaper(max)) } } func (s *taskStats) add(task *trace.UserTaskSummary) { s.Count++ if task.Complete() { s.Histogram.Add(task.End.Time().Sub(task.Start.Time())) } } var templUserTaskTypes = template.Must(template.New("").Parse(` <!DOCTYPE html> <title>Tasks</title> <style>` + traceviewer.CommonStyle + ` .histoTime { width: 20%; white-space:nowrap; } th { background-color: #050505; color: #fff; } table { border-collapse: collapse; } td, th { padding-left: 8px; padding-right: 8px; padding-top: 4px; padding-bottom: 4px; } </style> <body> Search log text: <form action="/usertask"><input name="logtext" type="text"><input type="submit"></form><br> <table border="1" sortable="1"> <tr> <th>Task type</th> <th>Count</th> <th>Duration distribution (complete tasks)</th> </tr> {{range $}} <tr> <td>{{.Type}}</td> <td><a href="/usertask?type={{.Type}}">{{.Count}}</a></td> <td>{{.Histogram.ToHTML (.UserTaskURL true)}}</td> </tr> {{end}} </table> </body> </html> `)) // UserTaskHandlerFunc returns a HandlerFunc that presents the details of the selected tasks. func UserTaskHandlerFunc(t *parsedTrace) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { filter, err := newTaskFilter(r) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } type event struct { WhenString string Elapsed time.Duration Goroutine trace.GoID What string // TODO: include stack trace of creation time } type task struct { WhenString string ID trace.TaskID Duration time.Duration Complete bool Events []event Start, End time.Duration // Time since the beginning of the trace GCTime time.Duration } var tasks []task for _, summary := range t.summary.Tasks { if !filter.match(t, summary) { continue } // Collect all the events for the task. var rawEvents []*trace.Event if summary.Start != nil { rawEvents = append(rawEvents, summary.Start) } if summary.End != nil { rawEvents = append(rawEvents, summary.End) } rawEvents = append(rawEvents, summary.Logs...) for _, r := range summary.Regions { if r.Start != nil { rawEvents = append(rawEvents, r.Start) } if r.End != nil { rawEvents = append(rawEvents, r.End) } } // Sort them. slices.SortStableFunc(rawEvents, func(a, b *trace.Event) int { return cmp.Compare(a.Time(), b.Time()) }) // Summarize them. var events []event last := t.startTime() for _, ev := range rawEvents { what := describeEvent(ev) if what == "" { continue } sinceStart := ev.Time().Sub(t.startTime()) events = append(events, event{ WhenString: fmt.Sprintf("%2.9f", sinceStart.Seconds()), Elapsed: ev.Time().Sub(last), What: what, Goroutine: primaryGoroutine(ev), }) last = ev.Time() } taskSpan := taskInterval(t, summary) taskStart := taskSpan.start.Sub(t.startTime()) // Produce the task summary. tasks = append(tasks, task{ WhenString: fmt.Sprintf("%2.9fs", taskStart.Seconds()), Duration: taskSpan.duration(), ID: summary.ID, Complete: summary.Complete(), Events: events, Start: taskStart, End: taskStart + taskSpan.duration(), }) } // Sort the tasks by duration. slices.SortFunc(tasks, func(a, b task) int { return cmp.Compare(a.Duration, b.Duration) }) // Emit table. err = templUserTaskType.Execute(w, struct { Name string Tasks []task }{ Name: filter.name, Tasks: tasks, }) if err != nil { log.Printf("failed to execute template: %v", err) http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) return } } } var templUserTaskType = template.Must(template.New("userTask").Funcs(template.FuncMap{ "elapsed": elapsed, "asMillisecond": asMillisecond, "trimSpace": strings.TrimSpace, }).Parse(` <!DOCTYPE html> <title>Tasks: {{.Name}}</title> <style>` + traceviewer.CommonStyle + ` body { font-family: sans-serif; } table#req-status td.family { padding-right: 2em; } table#req-status td.active { padding-right: 1em; } table#req-status td.empty { color: #aaa; } table#reqs { margin-top: 1em; border-collapse: collapse; } table#reqs tr.first { font-weight: bold; } table#reqs td { font-family: monospace; } table#reqs td.when { text-align: right; white-space: nowrap; } table#reqs td.elapsed { padding: 0 0.5em; text-align: right; white-space: pre; width: 10em; } address { font-size: smaller; margin-top: 5em; } </style> <body> <h2>User Task: {{.Name}}</h2> Search log text: <form onsubmit="window.location.search+='&logtext='+window.logtextinput.value; return false"> <input name="logtext" id="logtextinput" type="text"><input type="submit"> </form><br> <table id="reqs"> <tr> <th>When</th> <th>Elapsed</th> <th>Goroutine</th> <th>Events</th> </tr> {{range $el := $.Tasks}} <tr class="first"> <td class="when">{{$el.WhenString}}</td> <td class="elapsed">{{$el.Duration}}</td> <td></td> <td> <a href="/trace?focustask={{$el.ID}}#{{asMillisecond $el.Start}}:{{asMillisecond $el.End}}">Task {{$el.ID}}</a> <a href="/trace?taskid={{$el.ID}}#{{asMillisecond $el.Start}}:{{asMillisecond $el.End}}">(goroutine view)</a> ({{if .Complete}}complete{{else}}incomplete{{end}}) </td> </tr> {{range $el.Events}} <tr> <td class="when">{{.WhenString}}</td> <td class="elapsed">{{elapsed .Elapsed}}</td> <td class="goid">{{.Goroutine}}</td> <td>{{.What}}</td> </tr> {{end}} {{end}} </body> </html> `)) // taskFilter represents a task filter specified by a user of cmd/trace. type taskFilter struct { name string cond []func(*parsedTrace, *trace.UserTaskSummary) bool } // match returns true if a task, described by its ID and summary, matches // the filter. func (f *taskFilter) match(t *parsedTrace, task *trace.UserTaskSummary) bool { if t == nil { return false } for _, c := range f.cond { if !c(t, task) { return false } } return true } // newTaskFilter creates a new task filter from URL query variables. func newTaskFilter(r *http.Request) (*taskFilter, error) { if err := r.ParseForm(); err != nil { return nil, err } var name []string var conditions []func(*parsedTrace, *trace.UserTaskSummary) bool param := r.Form if typ, ok := param["type"]; ok && len(typ) > 0 { name = append(name, fmt.Sprintf("%q", typ[0])) conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { return task.Name == typ[0] }) } if complete := r.FormValue("complete"); complete == "1" { name = append(name, "complete") conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { return task.Complete() }) } else if complete == "0" { name = append(name, "incomplete") conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { return !task.Complete() }) } if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { name = append(name, fmt.Sprintf("latency >= %s", lat)) conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { return task.Complete() && taskInterval(t, task).duration() >= lat }) } if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { name = append(name, fmt.Sprintf("latency <= %s", lat)) conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { return task.Complete() && taskInterval(t, task).duration() <= lat }) } if text := r.FormValue("logtext"); text != "" { name = append(name, fmt.Sprintf("log contains %q", text)) conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { return taskMatches(task, text) }) } return &taskFilter{name: strings.Join(name, ","), cond: conditions}, nil } func taskInterval(t *parsedTrace, s *trace.UserTaskSummary) interval { var i interval if s.Start != nil { i.start = s.Start.Time() } else { i.start = t.startTime() } if s.End != nil { i.end = s.End.Time() } else { i.end = t.endTime() } return i } func taskMatches(t *trace.UserTaskSummary, text string) bool { matches := func(s string) bool { return strings.Contains(s, text) } if matches(t.Name) { return true } for _, r := range t.Regions { if matches(r.Name) { return true } } for _, ev := range t.Logs { log := ev.Log() if matches(log.Category) { return true } if matches(log.Message) { return true } } return false } func describeEvent(ev *trace.Event) string { switch ev.Kind() { case trace.EventStateTransition: st := ev.StateTransition() if st.Resource.Kind != trace.ResourceGoroutine { return "" } old, new := st.Goroutine() return fmt.Sprintf("%s -> %s", old, new) case trace.EventRegionBegin: return fmt.Sprintf("region %q begin", ev.Region().Type) case trace.EventRegionEnd: return fmt.Sprintf("region %q end", ev.Region().Type) case trace.EventTaskBegin: t := ev.Task() return fmt.Sprintf("task %q (D %d, parent %d) begin", t.Type, t.ID, t.Parent) case trace.EventTaskEnd: return "task end" case trace.EventLog: log := ev.Log() if log.Category != "" { return fmt.Sprintf("log %q", log.Message) } return fmt.Sprintf("log (category: %s): %q", log.Category, log.Message) } return "" } func primaryGoroutine(ev *trace.Event) trace.GoID { if ev.Kind() != trace.EventStateTransition { return ev.Goroutine() } st := ev.StateTransition() if st.Resource.Kind != trace.ResourceGoroutine { return trace.NoGoroutine } return st.Resource.Goroutine() } func elapsed(d time.Duration) string { b := fmt.Appendf(nil, "%.9f", d.Seconds()) // For subsecond durations, blank all zeros before decimal point, // and all zeros between the decimal point and the first non-zero digit. if d < time.Second { dot := bytes.IndexByte(b, '.') for i := 0; i < dot; i++ { b[i] = ' ' } for i := dot + 1; i < len(b); i++ { if b[i] == '0' { b[i] = ' ' } else { break } } } return string(b) } func asMillisecond(d time.Duration) float64 { return float64(d.Nanoseconds()) / float64(time.Millisecond) }
go/src/cmd/trace/tasks.go/0
{ "file_path": "go/src/cmd/trace/tasks.go", "repo_id": "go", "token_count": 4872 }
174
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "errors" "fmt" "os" "github.com/google/pprof/internal/binutils" "github.com/google/pprof/internal/plugin" ) type source struct { Sources []string ExecName string BuildID string Base []string DiffBase bool Normalize bool Seconds int Timeout int Symbolize string HTTPHostport string HTTPDisableBrowser bool Comment string } // parseFlags parses the command lines through the specified flags package // and returns the source of the profile and optionally the command // for the kind of report to generate (nil for interactive use). func parseFlags(o *plugin.Options) (*source, []string, error) { flag := o.Flagset // Comparisons. flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") // Source options. flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") flagBuildID := flag.String("buildid", "", "Override build id for first mapping") flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile") flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile") // CPU profile options flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles") // Heap profile options flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size") flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts") flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size") flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts") // Contention profile options flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region") flagContentions := flag.Bool("contentions", false, "Display number of delays at each region") flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region") flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames") flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browser for the interactive web UI") // Flags that set configuration properties. cfg := currentConfig() configFlagSetter := installConfigFlags(flag, &cfg) flagCommands := make(map[string]*bool) flagParamCommands := make(map[string]*string) for name, cmd := range pprofCommands { if cmd.hasParam { flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp") } else { flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format") } } args := flag.Parse(func() { o.UI.Print(usageMsgHdr + usage(true) + usageMsgSrc + flag.ExtraUsage() + usageMsgVars) }) if len(args) == 0 { return nil, nil, errors.New("no profile source specified") } var execName string // Recognize first argument as an executable or buildid override. if len(args) > 1 { arg0 := args[0] if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil { file.Close() execName = arg0 args = args[1:] } } // Apply any specified flags to cfg. if err := configFlagSetter(); err != nil { return nil, nil, err } cmd, err := outputFormat(flagCommands, flagParamCommands) if err != nil { return nil, nil, err } if cmd != nil && *flagHTTP != "" { return nil, nil, errors.New("-http is not compatible with an output format on the command line") } if *flagNoBrowser && *flagHTTP == "" { return nil, nil, errors.New("-no_browser only makes sense with -http") } si := cfg.SampleIndex si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI) si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) cfg.SampleIndex = si if *flagMeanDelay { cfg.Mean = true } source := &source{ Sources: args, ExecName: execName, BuildID: *flagBuildID, Seconds: *flagSeconds, Timeout: *flagTimeout, Symbolize: *flagSymbolize, HTTPHostport: *flagHTTP, HTTPDisableBrowser: *flagNoBrowser, Comment: *flagAddComment, } if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil { return nil, nil, err } normalize := cfg.Normalize if normalize && len(source.Base) == 0 { return nil, nil, errors.New("must have base profile to normalize by") } source.Normalize = normalize if bu, ok := o.Obj.(*binutils.Binutils); ok { bu.SetTools(*flagTools) } setCurrentConfig(cfg) return source, cmd, nil } // addBaseProfiles adds the list of base profiles or diff base profiles to // the source. This function will return an error if both base and diff base // profiles are specified. func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error { base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase) if len(base) > 0 && len(diffBase) > 0 { return errors.New("-base and -diff_base flags cannot both be specified") } source.Base = base if len(diffBase) > 0 { source.Base, source.DiffBase = diffBase, true } return nil } // dropEmpty list takes a slice of string pointers, and outputs a slice of // non-empty strings associated with the flag. func dropEmpty(list []*string) []string { var l []string for _, s := range list { if *s != "" { l = append(l, *s) } } return l } // installConfigFlags creates command line flags for configuration // fields and returns a function which can be called after flags have // been parsed to copy any flags specified on the command line to // *cfg. func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { // List of functions for setting the different parts of a config. var setters []func() var err error // Holds any errors encountered while running setters. for _, field := range configFields { n := field.name help := configHelp[n] var setter func() switch ptr := cfg.fieldPtr(field).(type) { case *bool: f := flag.Bool(n, *ptr, help) setter = func() { *ptr = *f } case *int: f := flag.Int(n, *ptr, help) setter = func() { *ptr = *f } case *float64: f := flag.Float64(n, *ptr, help) setter = func() { *ptr = *f } case *string: if len(field.choices) == 0 { f := flag.String(n, *ptr, help) setter = func() { *ptr = *f } } else { // Make a separate flag per possible choice. // Set all flags to initially false so we can // identify conflicts. bools := make(map[string]*bool) for _, choice := range field.choices { bools[choice] = flag.Bool(choice, false, configHelp[choice]) } setter = func() { var set []string for k, v := range bools { if *v { set = append(set, k) } } switch len(set) { case 0: // Leave as default value. case 1: *ptr = set[0] default: err = fmt.Errorf("conflicting options set: %v", set) } } } } setters = append(setters, setter) } return func() error { // Apply the setter for every flag. for _, setter := range setters { setter() if err != nil { return err } } return nil } } func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string { if *flag { if si == "" { return sampleType } ui.PrintErr("Multiple value selections, ignoring ", option) } return si } func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) { for n, b := range bcmd { if *b { if cmd != nil { return nil, errors.New("must set at most one output format") } cmd = []string{n} } } for n, s := range acmd { if *s != "" { if cmd != nil { return nil, errors.New("must set at most one output format") } cmd = []string{n, *s} } } return cmd, nil } var usageMsgHdr = `usage: Produce output in the specified format. pprof <format> [options] [binary] <source> ... Omit the format to get an interactive shell whose commands can be used to generate various views of a profile pprof [options] [binary] <source> ... Omit the format and provide the "-http" flag to get an interactive web interface at the specified host:port that can be used to navigate through various views of a profile. pprof -http [host]:[port] [options] [binary] <source> ... Details: ` var usageMsgSrc = "\n\n" + " Source options:\n" + " -seconds Duration for time-based profile collection\n" + " -timeout Timeout in seconds for profile collection\n" + " -buildid Override build id for main binary\n" + " -add_comment Free-form annotation to add to the profile\n" + " Displayed on some reports or with pprof -comments\n" + " -diff_base source Source of base profile for comparison\n" + " -base source Source of base profile for profile subtraction\n" + " profile.pb.gz Profile in compressed protobuf format\n" + " legacy_profile Profile in legacy pprof format\n" + " http://host/profile URL for profile handler to retrieve\n" + " -symbolize= Controls source of symbol information\n" + " none Do not attempt symbolization\n" + " local Examine only local binaries\n" + " fastlocal Only get function names from local binaries\n" + " remote Do not examine local binaries\n" + " force Force re-symbolization\n" + " Binary Local path or build id of binary for symbolization\n" var usageMsgVars = "\n\n" + " Misc options:\n" + " -http Provide web interface at host:port.\n" + " Host is optional and 'localhost' by default.\n" + " Port is optional and a randomly available port by default.\n" + " -no_browser Skip opening a browser for the interactive web UI.\n" + " -tools Search path for object tools\n" + "\n" + " Legacy convenience options:\n" + " -inuse_space Same as -sample_index=inuse_space\n" + " -inuse_objects Same as -sample_index=inuse_objects\n" + " -alloc_space Same as -sample_index=alloc_space\n" + " -alloc_objects Same as -sample_index=alloc_objects\n" + " -total_delay Same as -sample_index=delay\n" + " -contentions Same as -sample_index=contentions\n" + " -mean_delay Same as -mean -sample_index=delay\n" + "\n" + " Environment Variables:\n" + " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" + " PPROF_TOOLS Search path for object-level tools\n" + " PPROF_BINARY_PATH Search path for local binary files\n" + " default: $HOME/pprof/binaries\n" + " searches $buildid/$name, $buildid/*, $path/$buildid,\n" + " ${buildid:0:2}/${buildid:2}.debug, $name, $path,\n" + " ${name}.debug, $dir/.debug/${name}.debug,\n" + " usr/lib/debug/$dir/${name}.debug\n" + " * On Windows, %USERPROFILE% is used instead of $HOME"
go/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go/0
{ "file_path": "go/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go", "repo_id": "go", "token_count": 4736 }
175
// stackViewer displays a flame-graph like view (extended to show callers). // stacks - report.StackSet // nodes - List of names for each source in report.StackSet function stackViewer(stacks, nodes) { 'use strict'; // Constants used in rendering. const ROW = 20; const PADDING = 2; const MIN_WIDTH = 4; const MIN_TEXT_WIDTH = 16; const TEXT_MARGIN = 2; const FONT_SIZE = 12; const MIN_FONT_SIZE = 8; // Fields let pivots = []; // Indices of currently selected data.Sources entries. let matches = new Set(); // Indices of sources that match search let elems = new Map(); // Mapping from source index to display elements let displayList = []; // List of boxes to display. let actionMenuOn = false; // Is action menu visible? let actionTarget = null; // Box on which action menu is operating. let diff = false; // Are we displaying a diff? for (const stack of stacks.Stacks) { if (stack.Value < 0) { diff = true; break; } } // Setup to allow measuring text width. const textSizer = document.createElement('canvas'); textSizer.id = 'textsizer'; const textContext = textSizer.getContext('2d'); // Get DOM elements. const chart = find('stack-chart'); const search = find('search'); const actions = find('action-menu'); const actionTitle = find('action-title'); const detailBox = find('current-details'); window.addEventListener('resize', render); window.addEventListener('popstate', render); search.addEventListener('keydown', handleSearchKey); // Withdraw action menu when clicking outside, or when item selected. document.addEventListener('mousedown', (e) => { if (!actions.contains(e.target)) { hideActionMenu(); } }); actions.addEventListener('click', hideActionMenu); // Initialize menus and other general UI elements. viewer(new URL(window.location.href), nodes, { hiliter: (n, on) => { return hilite(n, on); }, current: () => { let r = new Map(); if (pivots.length == 1 && pivots[0] == 0) { // Not pivoting } else { for (let p of pivots) { r.set(p, true); } } return r; }}); render(); // Helper functions follow: // hilite changes the highlighting of elements corresponding to specified src. function hilite(src, on) { if (on) { matches.add(src); } else { matches.delete(src); } toggleClass(src, 'hilite', on); return true; } // Display action menu (triggered by right-click on a frame) function showActionMenu(e, box) { if (box.src == 0) return; // No action menu for root e.preventDefault(); // Disable browser context menu const src = stacks.Sources[box.src]; actionTitle.innerText = src.Display[src.Display.length-1]; const menu = actions; menu.style.display = 'block'; // Compute position so menu stays visible and near the mouse. const x = Math.min(e.clientX - 10, document.body.clientWidth - menu.clientWidth); const y = Math.min(e.clientY - 10, document.body.clientHeight - menu.clientHeight); menu.style.left = x + 'px'; menu.style.top = y + 'px'; // Set menu links to operate on clicked box. setHrefParam('action-source', 'f', box.src); setHrefParam('action-source-tab', 'f', box.src); setHrefParam('action-focus', 'f', box.src); setHrefParam('action-ignore', 'i', box.src); setHrefParam('action-hide', 'h', box.src); setHrefParam('action-showfrom', 'sf', box.src); toggleClass(box.src, 'hilite2', true); actionTarget = box; actionMenuOn = true; } function hideActionMenu() { actions.style.display = 'none'; actionMenuOn = false; if (actionTarget != null) { toggleClass(actionTarget.src, 'hilite2', false); } } // setHrefParam updates the specified parameter in the href of an <a> // element to make it operate on the specified src. function setHrefParam(id, param, src) { const elem = document.getElementById(id); if (!elem) return; let url = new URL(elem.href); url.hash = ''; // Copy params from this page's URL. const params = url.searchParams; for (const p of new URLSearchParams(window.location.search)) { params.set(p[0], p[1]); } // Update params to include src. let v = pprofQuoteMeta(stacks.Sources[src].FullName); if (param != 'f' && param != 'sf') { // old f,sf values are overwritten // Add new source to current parameter value. const old = params.get(param); if (old && old != '') { v += '|' + old; } } params.set(param, v); elem.href = url.toString(); } // Capture Enter key in the search box to make it pivot instead of focus. function handleSearchKey(e) { if (e.key != 'Enter') return; e.stopImmediatePropagation(); // Disable normal enter key handling const val = search.value; try { new RegExp(search.value); } catch (error) { return; // TODO: Display error state in search box } switchPivots(val); } function switchPivots(regexp) { // Switch URL without hitting the server. const url = new URL(document.URL); if (regexp === '' || regexp === '^$') { url.searchParams.delete('p'); // Not pivoting } else { url.searchParams.set('p', regexp); } history.pushState('', '', url.toString()); // Makes back-button work matches = new Set(); search.value = ''; render(); } function handleEnter(box, div) { if (actionMenuOn) return; const src = stacks.Sources[box.src]; div.title = details(box) + ' │ ' + src.FullName + (src.Inlined ? "\n(inlined)" : ""); detailBox.innerText = summary(box.sumpos, box.sumneg); // Highlight all boxes that have the same source as box. toggleClass(box.src, 'hilite2', true); } function handleLeave(box) { if (actionMenuOn) return; detailBox.innerText = ''; toggleClass(box.src, 'hilite2', false); } // Return list of sources that match the regexp given by the 'p' URL parameter. function urlPivots() { const pivots = []; const params = (new URL(document.URL)).searchParams; const val = params.get('p'); if (val !== null && val != '') { try { const re = new RegExp(val); for (let i = 0; i < stacks.Sources.length; i++) { const src = stacks.Sources[i]; if (re.test(src.UniqueName) || re.test(src.FileName)) { pivots.push(i); } } } catch (error) {} } if (pivots.length == 0) { pivots.push(0); } return pivots; } // render re-generates the stack display. function render() { pivots = urlPivots(); // Get places where pivots occur. let places = []; for (let pivot of pivots) { const src = stacks.Sources[pivot]; for (let p of src.Places) { places.push(p); } } const width = chart.clientWidth; elems.clear(); actionTarget = null; const [pos, neg] = totalValue(places); const total = pos + neg; const xscale = (width-2*PADDING) / total; // Converts from profile value to X pixels const x = PADDING; const y = 0; displayList.length = 0; renderStacks(0, xscale, x, y, places, +1); // Callees renderStacks(0, xscale, x, y-ROW, places, -1); // Callers (ROW left for separator) display(xscale, pos, neg, displayList); } // renderStacks creates boxes with top-left at x,y with children drawn as // nested stacks (below or above based on the sign of direction). // Returns the largest y coordinate filled. function renderStacks(depth, xscale, x, y, places, direction) { // Example: suppose we are drawing the following stacks: // a->b->c // a->b->d // a->e->f // After rendering a, we will call renderStacks, with places pointing to // the preceding stacks. // // We first group all places with the same leading entry. In this example // we get [b->c, b->d] and [e->f]. We render the two groups side-by-side. const groups = partitionPlaces(places); for (const g of groups) { renderGroup(depth, xscale, x, y, g, direction); x += groupWidth(xscale, g); } } // Some of the types used below: // // // Group represents a displayed (sub)tree. // interface Group { // name: string; // Full name of source // src: number; // Index in stacks.Sources // self: number; // Contribution as leaf (may be < 0 for diffs) // sumpos: number; // Sum of |self| of positive nodes in tree (>= 0) // sumneg: number; // Sum of |self| of negative nodes in tree (>= 0) // places: Place[]; // Stack slots that contributed to this group // } // // // Box is a rendered item. // interface Box { // x: number; // X coordinate of top-left // y: number; // Y coordinate of top-left // width: number; // Width of box to display // src: number; // Index in stacks.Sources // sumpos: number; // From corresponding Group // sumneg: number; // From corresponding Group // self: number; // From corresponding Group // }; function groupWidth(xscale, g) { return xscale * (g.sumpos + g.sumneg); } function renderGroup(depth, xscale, x, y, g, direction) { // Skip if not wide enough. const width = groupWidth(xscale, g); if (width < MIN_WIDTH) return; // Draw the box for g.src (except for selected element in upwards direction // since that duplicates the box we added in downwards direction). if (depth != 0 || direction > 0) { const box = { x: x, y: y, width: width, src: g.src, sumpos: g.sumpos, sumneg: g.sumneg, self: g.self, }; displayList.push(box); if (direction > 0) { // Leave gap on left hand side to indicate self contribution. x += xscale*Math.abs(g.self); } } y += direction * ROW; // Find child or parent stacks. const next = []; for (const place of g.places) { const stack = stacks.Stacks[place.Stack]; const nextSlot = place.Pos + direction; if (nextSlot >= 0 && nextSlot < stack.Sources.length) { next.push({Stack: place.Stack, Pos: nextSlot}); } } renderStacks(depth+1, xscale, x, y, next, direction); } // partitionPlaces partitions a set of places into groups where each group // contains places with the same source. If a stack occurs multiple times // in places, only the outer-most occurrence is kept. function partitionPlaces(places) { // Find outer-most slot per stack (used later to elide duplicate stacks). const stackMap = new Map(); // Map from stack index to outer-most slot# for (const place of places) { const prevSlot = stackMap.get(place.Stack); if (prevSlot && prevSlot <= place.Pos) { // We already have a higher slot in this stack. } else { stackMap.set(place.Stack, place.Pos); } } // Now partition the stacks. const groups = []; // Array of Group {name, src, sum, self, places} const groupMap = new Map(); // Map from Source to Group for (const place of places) { if (stackMap.get(place.Stack) != place.Pos) { continue; } const stack = stacks.Stacks[place.Stack]; const src = stack.Sources[place.Pos]; let group = groupMap.get(src); if (!group) { const name = stacks.Sources[src].FullName; group = {name: name, src: src, sumpos: 0, sumneg: 0, self: 0, places: []}; groupMap.set(src, group); groups.push(group); } if (stack.Value < 0) { group.sumneg += -stack.Value; } else { group.sumpos += stack.Value; } group.self += (place.Pos == stack.Sources.length-1) ? stack.Value : 0; group.places.push(place); } // Order by decreasing cost (makes it easier to spot heavy functions). // Though alphabetical ordering is a potential alternative that will make // profile comparisons easier. groups.sort(function(a, b) { return (b.sumpos + b.sumneg) - (a.sumpos + a.sumneg); }); return groups; } function display(xscale, posTotal, negTotal, list) { // Sort boxes so that text selection follows a predictable order. list.sort(function(a, b) { if (a.y != b.y) return a.y - b.y; return a.x - b.x; }); // Adjust Y coordinates so that zero is at top. let adjust = (list.length > 0) ? list[0].y : 0; adjust -= ROW + 2*PADDING; // Room for details const divs = []; for (const box of list) { box.y -= adjust; divs.push(drawBox(xscale, box)); } divs.push(drawSep(-adjust, posTotal, negTotal)); const h = (list.length > 0 ? list[list.length-1].y : 0) + 4*ROW; chart.style.height = h+'px'; chart.replaceChildren(...divs); } function drawBox(xscale, box) { const srcIndex = box.src; const src = stacks.Sources[srcIndex]; function makeRect(cl, x, y, w, h) { const r = document.createElement('div'); r.style.left = x+'px'; r.style.top = y+'px'; r.style.width = w+'px'; r.style.height = h+'px'; r.classList.add(cl); return r; } // Background const w = box.width - 1; // Leave 1px gap const r = makeRect('boxbg', box.x, box.y, w, ROW); if (!diff) r.style.background = makeColor(src.Color); addElem(srcIndex, r); if (!src.Inlined) { r.classList.add('not-inlined'); } // Positive/negative indicator for diff mode. if (diff) { const delta = box.sumpos - box.sumneg; const partWidth = xscale * Math.abs(delta); if (partWidth >= MIN_WIDTH) { r.appendChild(makeRect((delta < 0 ? 'negative' : 'positive'), 0, 0, partWidth, ROW-1)); } } // Label if (box.width >= MIN_TEXT_WIDTH) { const t = document.createElement('div'); t.classList.add('boxtext'); fitText(t, box.width-2*TEXT_MARGIN, src.Display); r.appendChild(t); } onClick(r, () => { switchPivots(pprofQuoteMeta(src.UniqueName)); }); r.addEventListener('mouseenter', () => { handleEnter(box, r); }); r.addEventListener('mouseleave', () => { handleLeave(box); }); r.addEventListener('contextmenu', (e) => { showActionMenu(e, box); }); return r; } // Handle clicks, but only if the mouse did not move during the click. function onClick(target, handler) { // Disable click if mouse moves more than threshold pixels since mousedown. const threshold = 3; let [x, y] = [-1, -1]; target.addEventListener('mousedown', (e) => { [x, y] = [e.clientX, e.clientY]; }); target.addEventListener('click', (e) => { if (Math.abs(e.clientX - x) <= threshold && Math.abs(e.clientY - y) <= threshold) { handler(); } }); } function drawSep(y, posTotal, negTotal) { const m = document.createElement('div'); m.innerText = summary(posTotal, negTotal); m.style.top = (y-ROW) + 'px'; m.style.left = PADDING + 'px'; m.style.width = (chart.clientWidth - PADDING*2) + 'px'; m.classList.add('separator'); return m; } // addElem registers an element that belongs to the specified src. function addElem(src, elem) { let list = elems.get(src); if (!list) { list = []; elems.set(src, list); } list.push(elem); elem.classList.toggle('hilite', matches.has(src)); } // Adds or removes cl from classList of all elements for the specified source. function toggleClass(src, cl, value) { const list = elems.get(src); if (list) { for (const elem of list) { elem.classList.toggle(cl, value); } } } // fitText sets text and font-size clipped to the specified width w. function fitText(t, avail, textList) { // Find first entry in textList that fits. let width = avail; textContext.font = FONT_SIZE + 'pt Arial'; for (let i = 0; i < textList.length; i++) { let text = textList[i]; width = textContext.measureText(text).width; if (width <= avail) { t.innerText = text; return; } } // Try to fit by dropping font size. let text = textList[textList.length-1]; const fs = Math.max(MIN_FONT_SIZE, FONT_SIZE * (avail / width)); t.style.fontSize = fs + 'pt'; t.innerText = text; } // totalValue returns the positive and negative sums of the Values of stacks // listed in places. function totalValue(places) { const seen = new Set(); let pos = 0; let neg = 0; for (const place of places) { if (seen.has(place.Stack)) continue; // Do not double-count stacks seen.add(place.Stack); const stack = stacks.Stacks[place.Stack]; if (stack.Value < 0) { neg += -stack.Value; } else { pos += stack.Value; } } return [pos, neg]; } function summary(pos, neg) { // Examples: // 6s (10%) // 12s (20%) 🠆 18s (30%) return diff ? diffText(neg, pos) : percentText(pos); } function details(box) { // Examples: // 6s (10%) // 6s (10%) │ self 3s (5%) // 6s (10%) │ 12s (20%) 🠆 18s (30%) let result = percentText(box.sumpos - box.sumneg); if (box.self != 0) { result += " │ self " + unitText(box.self); } if (diff && box.sumpos > 0 && box.sumneg > 0) { result += " │ " + diffText(box.sumneg, box.sumpos); } return result; } // diffText returns text that displays from and to alongside their percentages. // E.g., 9s (45%) 🠆 10s (50%) function diffText(from, to) { return percentText(from) + " 🠆 " + percentText(to); } // percentText returns text that displays v in appropriate units alongside its // percentange. function percentText(v) { function percent(v, total) { return Number(((100.0 * v) / total).toFixed(1)) + '%'; } return unitText(v) + " (" + percent(v, stacks.Total) + ")"; } // unitText returns a formatted string to display for value. function unitText(value) { return pprofUnitText(value*stacks.Scale, stacks.Unit); } function find(name) { const elem = document.getElementById(name); if (!elem) { throw 'element not found: ' + name } return elem; } function makeColor(index) { // Rotate hue around a circle. Multiple by phi to spread things // out better. Use 50% saturation to make subdued colors, and // 80% lightness to have good contrast with black foreground text. const PHI = 1.618033988; const hue = (index+1) * PHI * 2 * Math.PI; // +1 to avoid 0 const hsl = `hsl(${hue}rad 50% 80%)`; return hsl; } } // pprofUnitText returns a formatted string to display for value in the specified unit. function pprofUnitText(value, unit) { const sign = (value < 0) ? "-" : ""; let v = Math.abs(value); // Rescale to appropriate display unit. let list = null; for (const def of pprofUnitDefs) { if (def.DefaultUnit.CanonicalName == unit) { list = def.Units; v *= def.DefaultUnit.Factor; break; } } if (list) { // Stop just before entry that is too large. for (let i = 0; i < list.length; i++) { if (i == list.length-1 || list[i+1].Factor > v) { v /= list[i].Factor; unit = list[i].CanonicalName; break; } } } return sign + Number(v.toFixed(2)) + unit; }
go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js/0
{ "file_path": "go/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js", "repo_id": "go", "token_count": 7542 }
176
package report import "regexp" // pkgRE extracts package name, It looks for the first "." or "::" that occurs // after the last "/". (Searching after the last / allows us to correctly handle // names that look like "some.url.com/foo.bar".) var pkgRE = regexp.MustCompile(`^((.*/)?[\w\d_]+)(\.|::)([^/]*)$`) // packageName returns the package name of the named symbol, or "" if not found. func packageName(name string) string { m := pkgRE.FindStringSubmatch(name) if m == nil { return "" } return m[1] }
go/src/cmd/vendor/github.com/google/pprof/internal/report/package.go/0
{ "file_path": "go/src/cmd/vendor/github.com/google/pprof/internal/report/package.go", "repo_id": "go", "token_count": 179 }
177
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package profile provides a representation of profile.proto and // methods to encode/decode profiles in this format. package profile import ( "bytes" "compress/gzip" "fmt" "io" "math" "path/filepath" "regexp" "sort" "strings" "sync" "time" ) // Profile is an in-memory representation of profile.proto. type Profile struct { SampleType []*ValueType DefaultSampleType string Sample []*Sample Mapping []*Mapping Location []*Location Function []*Function Comments []string DropFrames string KeepFrames string TimeNanos int64 DurationNanos int64 PeriodType *ValueType Period int64 // The following fields are modified during encoding and copying, // so are protected by a Mutex. encodeMu sync.Mutex commentX []int64 dropFramesX int64 keepFramesX int64 stringTable []string defaultSampleTypeX int64 } // ValueType corresponds to Profile.ValueType type ValueType struct { Type string // cpu, wall, inuse_space, etc Unit string // seconds, nanoseconds, bytes, etc typeX int64 unitX int64 } // Sample corresponds to Profile.Sample type Sample struct { Location []*Location Value []int64 // Label is a per-label-key map to values for string labels. // // In general, having multiple values for the given label key is strongly // discouraged - see docs for the sample label field in profile.proto. The // main reason this unlikely state is tracked here is to make the // decoding->encoding roundtrip not lossy. But we expect that the value // slices present in this map are always of length 1. Label map[string][]string // NumLabel is a per-label-key map to values for numeric labels. See a note // above on handling multiple values for a label. NumLabel map[string][]int64 // NumUnit is a per-label-key map to the unit names of corresponding numeric // label values. The unit info may be missing even if the label is in // NumLabel, see the docs in profile.proto for details. When the value is // slice is present and not nil, its length must be equal to the length of // the corresponding value slice in NumLabel. NumUnit map[string][]string locationIDX []uint64 labelX []label } // label corresponds to Profile.Label type label struct { keyX int64 // Exactly one of the two following values must be set strX int64 numX int64 // Integer value for this label // can be set if numX has value unitX int64 } // Mapping corresponds to Profile.Mapping type Mapping struct { ID uint64 Start uint64 Limit uint64 Offset uint64 File string BuildID string HasFunctions bool HasFilenames bool HasLineNumbers bool HasInlineFrames bool fileX int64 buildIDX int64 // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. // For linux kernel mappings generated by some tools, correct symbolization depends // on knowing which of the two possible relocation symbols was used for `Start`. // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). // // Note, this public field is not persisted in the proto. For the purposes of // copying / merging / hashing profiles, it is considered subsumed by `File`. KernelRelocationSymbol string } // Location corresponds to Profile.Location type Location struct { ID uint64 Mapping *Mapping Address uint64 Line []Line IsFolded bool mappingIDX uint64 } // Line corresponds to Profile.Line type Line struct { Function *Function Line int64 Column int64 functionIDX uint64 } // Function corresponds to Profile.Function type Function struct { ID uint64 Name string SystemName string Filename string StartLine int64 nameX int64 systemNameX int64 filenameX int64 } // Parse parses a profile and checks for its validity. The input // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { data, err := io.ReadAll(r) if err != nil { return nil, err } return ParseData(data) } // ParseData parses a profile from a buffer and checks for its // validity. func ParseData(data []byte) (*Profile, error) { var p *Profile var err error if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) } } if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { p, err = parseLegacy(data) } if err != nil { return nil, fmt.Errorf("parsing profile: %v", err) } if err := p.CheckValid(); err != nil { return nil, fmt.Errorf("malformed profile: %v", err) } return p, nil } var errUnrecognized = fmt.Errorf("unrecognized profile format") var errMalformed = fmt.Errorf("malformed profile format") var errNoData = fmt.Errorf("empty input file") var errConcatProfile = fmt.Errorf("concatenated profiles detected") func parseLegacy(data []byte) (*Profile, error) { parsers := []func([]byte) (*Profile, error){ parseCPU, parseHeap, parseGoCount, // goroutine, threadcreate parseThread, parseContention, parseJavaProfile, } for _, parser := range parsers { p, err := parser(data) if err == nil { p.addLegacyFrameInfo() return p, nil } if err != errUnrecognized { return nil, err } } return nil, errUnrecognized } // ParseUncompressed parses an uncompressed protobuf into a profile. func ParseUncompressed(data []byte) (*Profile, error) { if len(data) == 0 { return nil, errNoData } p := &Profile{} if err := unmarshal(data, p); err != nil { return nil, err } if err := p.postDecode(); err != nil { return nil, err } return p, nil } var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) // massageMappings applies heuristic-based changes to the profile // mappings to account for quirks of some environments. func (p *Profile) massageMappings() { // Merge adjacent regions with matching names, checking that the offsets match if len(p.Mapping) > 1 { mappings := []*Mapping{p.Mapping[0]} for _, m := range p.Mapping[1:] { lm := mappings[len(mappings)-1] if adjacent(lm, m) { lm.Limit = m.Limit if m.File != "" { lm.File = m.File } if m.BuildID != "" { lm.BuildID = m.BuildID } p.updateLocationMapping(m, lm) continue } mappings = append(mappings, m) } p.Mapping = mappings } // Use heuristics to identify main binary and move it to the top of the list of mappings for i, m := range p.Mapping { file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) if len(file) == 0 { continue } if len(libRx.FindStringSubmatch(file)) > 0 { continue } if file[0] == '[' { continue } // Swap what we guess is main to position 0. p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] break } // Keep the mapping IDs neatly sorted for i, m := range p.Mapping { m.ID = uint64(i + 1) } } // adjacent returns whether two mapping entries represent the same // mapping that has been split into two. Check that their addresses are adjacent, // and if the offsets match, if they are available. func adjacent(m1, m2 *Mapping) bool { if m1.File != "" && m2.File != "" { if m1.File != m2.File { return false } } if m1.BuildID != "" && m2.BuildID != "" { if m1.BuildID != m2.BuildID { return false } } if m1.Limit != m2.Start { return false } if m1.Offset != 0 && m2.Offset != 0 { offset := m1.Offset + (m1.Limit - m1.Start) if offset != m2.Offset { return false } } return true } func (p *Profile) updateLocationMapping(from, to *Mapping) { for _, l := range p.Location { if l.Mapping == from { l.Mapping = to } } } func serialize(p *Profile) []byte { p.encodeMu.Lock() p.preEncode() b := marshal(p) p.encodeMu.Unlock() return b } // Write writes the profile as a gzip-compressed marshaled protobuf. func (p *Profile) Write(w io.Writer) error { zw := gzip.NewWriter(w) defer zw.Close() _, err := zw.Write(serialize(p)) return err } // WriteUncompressed writes the profile as a marshaled protobuf. func (p *Profile) WriteUncompressed(w io.Writer) error { _, err := w.Write(serialize(p)) return err } // CheckValid tests whether the profile is valid. Checks include, but are // not limited to: // - len(Profile.Sample[n].value) == len(Profile.value_unit) // - Sample.id has a corresponding Profile.Location func (p *Profile) CheckValid() error { // Check that sample values are consistent sampleLen := len(p.SampleType) if sampleLen == 0 && len(p.Sample) != 0 { return fmt.Errorf("missing sample type information") } for _, s := range p.Sample { if s == nil { return fmt.Errorf("profile has nil sample") } if len(s.Value) != sampleLen { return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) } for _, l := range s.Location { if l == nil { return fmt.Errorf("sample has nil location") } } } // Check that all mappings/locations/functions are in the tables // Check that there are no duplicate ids mappings := make(map[uint64]*Mapping, len(p.Mapping)) for _, m := range p.Mapping { if m == nil { return fmt.Errorf("profile has nil mapping") } if m.ID == 0 { return fmt.Errorf("found mapping with reserved ID=0") } if mappings[m.ID] != nil { return fmt.Errorf("multiple mappings with same id: %d", m.ID) } mappings[m.ID] = m } functions := make(map[uint64]*Function, len(p.Function)) for _, f := range p.Function { if f == nil { return fmt.Errorf("profile has nil function") } if f.ID == 0 { return fmt.Errorf("found function with reserved ID=0") } if functions[f.ID] != nil { return fmt.Errorf("multiple functions with same id: %d", f.ID) } functions[f.ID] = f } locations := make(map[uint64]*Location, len(p.Location)) for _, l := range p.Location { if l == nil { return fmt.Errorf("profile has nil location") } if l.ID == 0 { return fmt.Errorf("found location with reserved id=0") } if locations[l.ID] != nil { return fmt.Errorf("multiple locations with same id: %d", l.ID) } locations[l.ID] = l if m := l.Mapping; m != nil { if m.ID == 0 || mappings[m.ID] != m { return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) } } for _, ln := range l.Line { f := ln.Function if f == nil { return fmt.Errorf("location id: %d has a line with nil function", l.ID) } if f.ID == 0 || functions[f.ID] != f { return fmt.Errorf("inconsistent function %p: %d", f, f.ID) } } } return nil } // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function m.HasFilenames = m.HasFilenames && filename m.HasLineNumbers = m.HasLineNumbers && linenumber } // Aggregate functions if !function || !filename { for _, f := range p.Function { if !function { f.Name = "" f.SystemName = "" } if !filename { f.Filename = "" } } } // Aggregate locations if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] } if !linenumber { for i := range l.Line { l.Line[i].Line = 0 l.Line[i].Column = 0 } } if !columnnumber { for i := range l.Line { l.Line[i].Column = 0 } } if !address { l.Address = 0 } } } return p.CheckValid() } // NumLabelUnits returns a map of numeric label keys to the units // associated with those keys and a map of those keys to any units // that were encountered but not used. // Unit for a given key is the first encountered unit for that key. If multiple // units are encountered for values paired with a particular key, then the first // unit encountered is used and all other units are returned in sorted order // in map of ignored units. // If no units are encountered for a particular key, the unit is then inferred // based on the key. func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { numLabelUnits := map[string]string{} ignoredUnits := map[string]map[string]bool{} encounteredKeys := map[string]bool{} // Determine units based on numeric tags for each sample. for _, s := range p.Sample { for k := range s.NumLabel { encounteredKeys[k] = true for _, unit := range s.NumUnit[k] { if unit == "" { continue } if wantUnit, ok := numLabelUnits[k]; !ok { numLabelUnits[k] = unit } else if wantUnit != unit { if v, ok := ignoredUnits[k]; ok { v[unit] = true } else { ignoredUnits[k] = map[string]bool{unit: true} } } } } } // Infer units for keys without any units associated with // numeric tag values. for key := range encounteredKeys { unit := numLabelUnits[key] if unit == "" { switch key { case "alignment", "request": numLabelUnits[key] = "bytes" default: numLabelUnits[key] = key } } } // Copy ignored units into more readable format unitsIgnored := make(map[string][]string, len(ignoredUnits)) for key, values := range ignoredUnits { units := make([]string, len(values)) i := 0 for unit := range values { units[i] = unit i++ } sort.Strings(units) unitsIgnored[key] = units } return numLabelUnits, unitsIgnored } // String dumps a text representation of a profile. Intended mainly // for debugging purposes. func (p *Profile) String() string { ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) if p.TimeNanos != 0 { ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) } if p.DurationNanos != 0 { ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) } ss = append(ss, "Samples:") var sh1 string for _, s := range p.SampleType { dflt := "" if s.Type == p.DefaultSampleType { dflt = "[dflt]" } sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) } ss = append(ss, strings.TrimSpace(sh1)) for _, s := range p.Sample { ss = append(ss, s.string()) } ss = append(ss, "Locations") for _, l := range p.Location { ss = append(ss, l.string()) } ss = append(ss, "Mappings") for _, m := range p.Mapping { ss = append(ss, m.string()) } return strings.Join(ss, "\n") + "\n" } // string dumps a text representation of a mapping. Intended mainly // for debugging purposes. func (m *Mapping) string() string { bits := "" if m.HasFunctions { bits = bits + "[FN]" } if m.HasFilenames { bits = bits + "[FL]" } if m.HasLineNumbers { bits = bits + "[LN]" } if m.HasInlineFrames { bits = bits + "[IN]" } return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", m.ID, m.Start, m.Limit, m.Offset, m.File, m.BuildID, bits) } // string dumps a text representation of a location. Intended mainly // for debugging purposes. func (l *Location) string() string { ss := []string{} locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) if m := l.Mapping; m != nil { locStr = locStr + fmt.Sprintf("M=%d ", m.ID) } if l.IsFolded { locStr = locStr + "[F] " } if len(l.Line) == 0 { ss = append(ss, locStr) } for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" } } ss = append(ss, locStr+lnStr) // Do not print location details past the first line locStr = " " } return strings.Join(ss, "\n") } // string dumps a text representation of a sample. Intended mainly // for debugging purposes. func (s *Sample) string() string { ss := []string{} var sv string for _, v := range s.Value { sv = fmt.Sprintf("%s %10d", sv, v) } sv = sv + ": " for _, l := range s.Location { sv = sv + fmt.Sprintf("%d ", l.ID) } ss = append(ss, sv) const labelHeader = " " if len(s.Label) > 0 { ss = append(ss, labelHeader+labelsToString(s.Label)) } if len(s.NumLabel) > 0 { ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) } return strings.Join(ss, "\n") } // labelsToString returns a string representation of a // map representing labels. func labelsToString(labels map[string][]string) string { ls := []string{} for k, v := range labels { ls = append(ls, fmt.Sprintf("%s:%v", k, v)) } sort.Strings(ls) return strings.Join(ls, " ") } // numLabelsToString returns a string representation of a map // representing numeric labels. func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { ls := []string{} for k, v := range numLabels { units := numUnits[k] var labelString string if len(units) == len(v) { values := make([]string, len(v)) for i, vv := range v { values[i] = fmt.Sprintf("%d %s", vv, units[i]) } labelString = fmt.Sprintf("%s:%v", k, values) } else { labelString = fmt.Sprintf("%s:%v", k, v) } ls = append(ls, labelString) } sort.Strings(ls) return strings.Join(ls, " ") } // SetLabel sets the specified key to the specified value for all samples in the // profile. func (p *Profile) SetLabel(key string, value []string) { for _, sample := range p.Sample { if sample.Label == nil { sample.Label = map[string][]string{key: value} } else { sample.Label[key] = value } } } // RemoveLabel removes all labels associated with the specified key for all // samples in the profile. func (p *Profile) RemoveLabel(key string) { for _, sample := range p.Sample { delete(sample.Label, key) } } // HasLabel returns true if a sample has a label with indicated key and value. func (s *Sample) HasLabel(key, value string) bool { for _, v := range s.Label[key] { if v == value { return true } } return false } // SetNumLabel sets the specified key to the specified value for all samples in the // profile. "unit" is a slice that describes the units that each corresponding member // of "values" is measured in (e.g. bytes or seconds). If there is no relevant // unit for a given value, that member of "unit" should be the empty string. // "unit" must either have the same length as "value", or be nil. func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { for _, sample := range p.Sample { if sample.NumLabel == nil { sample.NumLabel = map[string][]int64{key: value} } else { sample.NumLabel[key] = value } if sample.NumUnit == nil { sample.NumUnit = map[string][]string{key: unit} } else { sample.NumUnit[key] = unit } } } // RemoveNumLabel removes all numerical labels associated with the specified key for all // samples in the profile. func (p *Profile) RemoveNumLabel(key string) { for _, sample := range p.Sample { delete(sample.NumLabel, key) delete(sample.NumUnit, key) } } // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { return s.HasLabel("pprof::base", "true") } // Scale multiplies all sample values in a profile by a constant and keeps // only samples that have at least one non-zero value. func (p *Profile) Scale(ratio float64) { if ratio == 1 { return } ratios := make([]float64, len(p.SampleType)) for i := range p.SampleType { ratios[i] = ratio } p.ScaleN(ratios) } // ScaleN multiplies each sample values in a sample by a different amount // and keeps only samples that have at least one non-zero value. func (p *Profile) ScaleN(ratios []float64) error { if len(p.SampleType) != len(ratios) { return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) } allOnes := true for _, r := range ratios { if r != 1 { allOnes = false break } } if allOnes { return nil } fillIdx := 0 for _, s := range p.Sample { keepSample := false for i, v := range s.Value { if ratios[i] != 1 { val := int64(math.Round(float64(v) * ratios[i])) s.Value[i] = val keepSample = keepSample || val != 0 } } if keepSample { p.Sample[fillIdx] = s fillIdx++ } } p.Sample = p.Sample[:fillIdx] return nil } // HasFunctions determines if all locations in this profile have // symbolized function information. func (p *Profile) HasFunctions() bool { for _, l := range p.Location { if l.Mapping != nil && !l.Mapping.HasFunctions { return false } } return true } // HasFileLines determines if all locations in this profile have // symbolized file and line number information. func (p *Profile) HasFileLines() bool { for _, l := range p.Location { if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { return false } } return true } // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. func (p *Profile) Copy() *Profile { pp := &Profile{} if err := unmarshal(serialize(p), pp); err != nil { panic(err) } if err := pp.postDecode(); err != nil { panic(err) } return pp }
go/src/cmd/vendor/github.com/google/pprof/profile/profile.go/0
{ "file_path": "go/src/cmd/vendor/github.com/google/pprof/profile/profile.go", "repo_id": "go", "token_count": 8556 }
178
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package armasm import ( "encoding/binary" "fmt" ) // An instFormat describes the format of an instruction encoding. // An instruction with 32-bit value x matches the format if x&mask == value // and the condition matches. // The condition matches if x>>28 == 0xF && value>>28==0xF // or if x>>28 != 0xF and value>>28 == 0. // If x matches the format, then the rest of the fields describe how to interpret x. // The opBits describe bits that should be extracted from x and added to the opcode. // For example opBits = 0x1234 means that the value // // (2 bits at offset 1) followed by (4 bits at offset 3) // // should be added to op. // Finally the args describe how to decode the instruction arguments. // args is stored as a fixed-size array; if there are fewer than len(args) arguments, // args[i] == 0 marks the end of the argument list. type instFormat struct { mask uint32 value uint32 priority int8 op Op opBits uint64 args instArgs } type instArgs [4]instArg var ( errMode = fmt.Errorf("unsupported execution mode") errShort = fmt.Errorf("truncated instruction") errUnknown = fmt.Errorf("unknown instruction") ) var decoderCover []bool // Decode decodes the leading bytes in src as a single instruction. func Decode(src []byte, mode Mode) (inst Inst, err error) { if mode != ModeARM { return Inst{}, errMode } if len(src) < 4 { return Inst{}, errShort } if decoderCover == nil { decoderCover = make([]bool, len(instFormats)) } x := binary.LittleEndian.Uint32(src) // The instFormat table contains both conditional and unconditional instructions. // Considering only the top 4 bits, the conditional instructions use mask=0, value=0, // while the unconditional instructions use mask=f, value=f. // Prepare a version of x with the condition cleared to 0 in conditional instructions // and then assume mask=f during matching. const condMask = 0xf0000000 xNoCond := x if x&condMask != condMask { xNoCond &^= condMask } var priority int8 Search: for i := range instFormats { f := &instFormats[i] if xNoCond&(f.mask|condMask) != f.value || f.priority <= priority { continue } delta := uint32(0) deltaShift := uint(0) for opBits := f.opBits; opBits != 0; opBits >>= 16 { n := uint(opBits & 0xFF) off := uint((opBits >> 8) & 0xFF) delta |= (x >> off) & (1<<n - 1) << deltaShift deltaShift += n } op := f.op + Op(delta) // Special case: BKPT encodes with condition but cannot have one. if op&^15 == BKPT_EQ && op != BKPT { continue Search } var args Args for j, aop := range f.args { if aop == 0 { break } arg := decodeArg(aop, x) if arg == nil { // cannot decode argument continue Search } args[j] = arg } decoderCover[i] = true inst = Inst{ Op: op, Args: args, Enc: x, Len: 4, } priority = f.priority continue Search } if inst.Op != 0 { return inst, nil } return Inst{}, errUnknown } // An instArg describes the encoding of a single argument. // In the names used for arguments, _p_ means +, _m_ means -, // _pm_ means ± (usually keyed by the U bit). // The _W suffix indicates a general addressing mode based on the P and W bits. // The _offset and _postindex suffixes force the given addressing mode. // The rest should be somewhat self-explanatory, at least given // the decodeArg function. type instArg uint8 const ( _ instArg = iota arg_APSR arg_FPSCR arg_Dn_half arg_R1_0 arg_R1_12 arg_R2_0 arg_R2_12 arg_R_0 arg_R_12 arg_R_12_nzcv arg_R_16 arg_R_16_WB arg_R_8 arg_R_rotate arg_R_shift_R arg_R_shift_imm arg_SP arg_Sd arg_Sd_Dd arg_Dd_Sd arg_Sm arg_Sm_Dm arg_Sn arg_Sn_Dn arg_const arg_endian arg_fbits arg_fp_0 arg_imm24 arg_imm5 arg_imm5_32 arg_imm5_nz arg_imm_12at8_4at0 arg_imm_4at16_12at0 arg_imm_vfp arg_label24 arg_label24H arg_label_m_12 arg_label_p_12 arg_label_pm_12 arg_label_pm_4_4 arg_lsb_width arg_mem_R arg_mem_R_pm_R_W arg_mem_R_pm_R_postindex arg_mem_R_pm_R_shift_imm_W arg_mem_R_pm_R_shift_imm_offset arg_mem_R_pm_R_shift_imm_postindex arg_mem_R_pm_imm12_W arg_mem_R_pm_imm12_offset arg_mem_R_pm_imm12_postindex arg_mem_R_pm_imm8_W arg_mem_R_pm_imm8_postindex arg_mem_R_pm_imm8at0_offset arg_option arg_registers arg_registers1 arg_registers2 arg_satimm4 arg_satimm5 arg_satimm4m1 arg_satimm5m1 arg_widthm1 ) // decodeArg decodes the arg described by aop from the instruction bits x. // It returns nil if x cannot be decoded according to aop. func decodeArg(aop instArg, x uint32) Arg { switch aop { default: return nil case arg_APSR: return APSR case arg_FPSCR: return FPSCR case arg_R_0: return Reg(x & (1<<4 - 1)) case arg_R_8: return Reg((x >> 8) & (1<<4 - 1)) case arg_R_12: return Reg((x >> 12) & (1<<4 - 1)) case arg_R_16: return Reg((x >> 16) & (1<<4 - 1)) case arg_R_12_nzcv: r := Reg((x >> 12) & (1<<4 - 1)) if r == R15 { return APSR_nzcv } return r case arg_R_16_WB: mode := AddrLDM if (x>>21)&1 != 0 { mode = AddrLDM_WB } return Mem{Base: Reg((x >> 16) & (1<<4 - 1)), Mode: mode} case arg_R_rotate: Rm := Reg(x & (1<<4 - 1)) typ, count := decodeShift(x) // ROR #0 here means ROR #0, but decodeShift rewrites to RRX #1. if typ == RotateRightExt { return Rm } return RegShift{Rm, typ, count} case arg_R_shift_R: Rm := Reg(x & (1<<4 - 1)) Rs := Reg((x >> 8) & (1<<4 - 1)) typ := Shift((x >> 5) & (1<<2 - 1)) return RegShiftReg{Rm, typ, Rs} case arg_R_shift_imm: Rm := Reg(x & (1<<4 - 1)) typ, count := decodeShift(x) if typ == ShiftLeft && count == 0 { return Reg(Rm) } return RegShift{Rm, typ, count} case arg_R1_0: return Reg((x & (1<<4 - 1))) case arg_R1_12: return Reg(((x >> 12) & (1<<4 - 1))) case arg_R2_0: return Reg((x & (1<<4 - 1)) | 1) case arg_R2_12: return Reg(((x >> 12) & (1<<4 - 1)) | 1) case arg_SP: return SP case arg_Sd_Dd: v := (x >> 12) & (1<<4 - 1) vx := (x >> 22) & 1 sz := (x >> 8) & 1 if sz != 0 { return D0 + Reg(vx<<4+v) } else { return S0 + Reg(v<<1+vx) } case arg_Dd_Sd: return decodeArg(arg_Sd_Dd, x^(1<<8)) case arg_Sd: v := (x >> 12) & (1<<4 - 1) vx := (x >> 22) & 1 return S0 + Reg(v<<1+vx) case arg_Sm_Dm: v := (x >> 0) & (1<<4 - 1) vx := (x >> 5) & 1 sz := (x >> 8) & 1 if sz != 0 { return D0 + Reg(vx<<4+v) } else { return S0 + Reg(v<<1+vx) } case arg_Sm: v := (x >> 0) & (1<<4 - 1) vx := (x >> 5) & 1 return S0 + Reg(v<<1+vx) case arg_Dn_half: v := (x >> 16) & (1<<4 - 1) vx := (x >> 7) & 1 return RegX{D0 + Reg(vx<<4+v), int((x >> 21) & 1)} case arg_Sn_Dn: v := (x >> 16) & (1<<4 - 1) vx := (x >> 7) & 1 sz := (x >> 8) & 1 if sz != 0 { return D0 + Reg(vx<<4+v) } else { return S0 + Reg(v<<1+vx) } case arg_Sn: v := (x >> 16) & (1<<4 - 1) vx := (x >> 7) & 1 return S0 + Reg(v<<1+vx) case arg_const: v := x & (1<<8 - 1) rot := (x >> 8) & (1<<4 - 1) * 2 if rot > 0 && v&3 == 0 { // could rotate less return ImmAlt{uint8(v), uint8(rot)} } if rot >= 24 && ((v<<(32-rot))&0xFF)>>(32-rot) == v { // could wrap around to rot==0. return ImmAlt{uint8(v), uint8(rot)} } return Imm(v>>rot | v<<(32-rot)) case arg_endian: return Endian((x >> 9) & 1) case arg_fbits: return Imm((16 << ((x >> 7) & 1)) - ((x&(1<<4-1))<<1 | (x>>5)&1)) case arg_fp_0: return Imm(0) case arg_imm24: return Imm(x & (1<<24 - 1)) case arg_imm5: return Imm((x >> 7) & (1<<5 - 1)) case arg_imm5_32: x = (x >> 7) & (1<<5 - 1) if x == 0 { x = 32 } return Imm(x) case arg_imm5_nz: x = (x >> 7) & (1<<5 - 1) if x == 0 { return nil } return Imm(x) case arg_imm_4at16_12at0: return Imm((x>>16)&(1<<4-1)<<12 | x&(1<<12-1)) case arg_imm_12at8_4at0: return Imm((x>>8)&(1<<12-1)<<4 | x&(1<<4-1)) case arg_imm_vfp: x = (x>>16)&(1<<4-1)<<4 | x&(1<<4-1) return Imm(x) case arg_label24: imm := (x & (1<<24 - 1)) << 2 return PCRel(int32(imm<<6) >> 6) case arg_label24H: h := (x >> 24) & 1 imm := (x&(1<<24-1))<<2 | h<<1 return PCRel(int32(imm<<6) >> 6) case arg_label_m_12: d := int32(x & (1<<12 - 1)) return Mem{Base: PC, Mode: AddrOffset, Offset: int16(-d)} case arg_label_p_12: d := int32(x & (1<<12 - 1)) return Mem{Base: PC, Mode: AddrOffset, Offset: int16(d)} case arg_label_pm_12: d := int32(x & (1<<12 - 1)) u := (x >> 23) & 1 if u == 0 { d = -d } return Mem{Base: PC, Mode: AddrOffset, Offset: int16(d)} case arg_label_pm_4_4: d := int32((x>>8)&(1<<4-1)<<4 | x&(1<<4-1)) u := (x >> 23) & 1 if u == 0 { d = -d } return PCRel(d) case arg_lsb_width: lsb := (x >> 7) & (1<<5 - 1) msb := (x >> 16) & (1<<5 - 1) if msb < lsb || msb >= 32 { return nil } return Imm(msb + 1 - lsb) case arg_mem_R: Rn := Reg((x >> 16) & (1<<4 - 1)) return Mem{Base: Rn, Mode: AddrOffset} case arg_mem_R_pm_R_postindex: // Treat [<Rn>],+/-<Rm> like [<Rn>,+/-<Rm>{,<shift>}]{!} // by forcing shift bits to <<0 and P=0, W=0 (postindex=true). return decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^((1<<7-1)<<5|1<<24|1<<21)) case arg_mem_R_pm_R_W: // Treat [<Rn>,+/-<Rm>]{!} like [<Rn>,+/-<Rm>{,<shift>}]{!} // by forcing shift bits to <<0. return decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^((1<<7-1)<<5)) case arg_mem_R_pm_R_shift_imm_offset: // Treat [<Rn>],+/-<Rm>{,<shift>} like [<Rn>,+/-<Rm>{,<shift>}]{!} // by forcing P=1, W=0 (index=false, wback=false). return decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^(1<<21)|1<<24) case arg_mem_R_pm_R_shift_imm_postindex: // Treat [<Rn>],+/-<Rm>{,<shift>} like [<Rn>,+/-<Rm>{,<shift>}]{!} // by forcing P=0, W=0 (postindex=true). return decodeArg(arg_mem_R_pm_R_shift_imm_W, x&^(1<<24|1<<21)) case arg_mem_R_pm_R_shift_imm_W: Rn := Reg((x >> 16) & (1<<4 - 1)) Rm := Reg(x & (1<<4 - 1)) typ, count := decodeShift(x) u := (x >> 23) & 1 w := (x >> 21) & 1 p := (x >> 24) & 1 if p == 0 && w == 1 { return nil } sign := int8(+1) if u == 0 { sign = -1 } mode := AddrMode(uint8(p<<1) | uint8(w^1)) return Mem{Base: Rn, Mode: mode, Sign: sign, Index: Rm, Shift: typ, Count: count} case arg_mem_R_pm_imm12_offset: // Treat [<Rn>,#+/-<imm12>] like [<Rn>{,#+/-<imm12>}]{!} // by forcing P=1, W=0 (index=false, wback=false). return decodeArg(arg_mem_R_pm_imm12_W, x&^(1<<21)|1<<24) case arg_mem_R_pm_imm12_postindex: // Treat [<Rn>],#+/-<imm12> like [<Rn>{,#+/-<imm12>}]{!} // by forcing P=0, W=0 (postindex=true). return decodeArg(arg_mem_R_pm_imm12_W, x&^(1<<24|1<<21)) case arg_mem_R_pm_imm12_W: Rn := Reg((x >> 16) & (1<<4 - 1)) u := (x >> 23) & 1 w := (x >> 21) & 1 p := (x >> 24) & 1 if p == 0 && w == 1 { return nil } sign := int8(+1) if u == 0 { sign = -1 } imm := int16(x & (1<<12 - 1)) mode := AddrMode(uint8(p<<1) | uint8(w^1)) return Mem{Base: Rn, Mode: mode, Offset: int16(sign) * imm} case arg_mem_R_pm_imm8_postindex: // Treat [<Rn>],#+/-<imm8> like [<Rn>{,#+/-<imm8>}]{!} // by forcing P=0, W=0 (postindex=true). return decodeArg(arg_mem_R_pm_imm8_W, x&^(1<<24|1<<21)) case arg_mem_R_pm_imm8_W: Rn := Reg((x >> 16) & (1<<4 - 1)) u := (x >> 23) & 1 w := (x >> 21) & 1 p := (x >> 24) & 1 if p == 0 && w == 1 { return nil } sign := int8(+1) if u == 0 { sign = -1 } imm := int16((x>>8)&(1<<4-1)<<4 | x&(1<<4-1)) mode := AddrMode(uint8(p<<1) | uint8(w^1)) return Mem{Base: Rn, Mode: mode, Offset: int16(sign) * imm} case arg_mem_R_pm_imm8at0_offset: Rn := Reg((x >> 16) & (1<<4 - 1)) u := (x >> 23) & 1 sign := int8(+1) if u == 0 { sign = -1 } imm := int16(x&(1<<8-1)) << 2 return Mem{Base: Rn, Mode: AddrOffset, Offset: int16(sign) * imm} case arg_option: return Imm(x & (1<<4 - 1)) case arg_registers: return RegList(x & (1<<16 - 1)) case arg_registers2: x &= 1<<16 - 1 n := 0 for i := 0; i < 16; i++ { if x>>uint(i)&1 != 0 { n++ } } if n < 2 { return nil } return RegList(x) case arg_registers1: Rt := (x >> 12) & (1<<4 - 1) return RegList(1 << Rt) case arg_satimm4: return Imm((x >> 16) & (1<<4 - 1)) case arg_satimm5: return Imm((x >> 16) & (1<<5 - 1)) case arg_satimm4m1: return Imm((x>>16)&(1<<4-1) + 1) case arg_satimm5m1: return Imm((x>>16)&(1<<5-1) + 1) case arg_widthm1: return Imm((x>>16)&(1<<5-1) + 1) } } // decodeShift decodes the shift-by-immediate encoded in x. func decodeShift(x uint32) (Shift, uint8) { count := (x >> 7) & (1<<5 - 1) typ := Shift((x >> 5) & (1<<2 - 1)) switch typ { case ShiftRight, ShiftRightSigned: if count == 0 { count = 32 } case RotateRight: if count == 0 { typ = RotateRightExt count = 1 } } return typ, uint8(count) }
go/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go", "repo_id": "go", "token_count": 6198 }
179
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ppc64asm import ( "fmt" "strings" ) // A BitField is a bit-field in a 32-bit word. // Bits are counted from 0 from the MSB to 31 as the LSB. type BitField struct { Offs uint8 // the offset of the left-most bit. Bits uint8 // length in bits. // This instruction word holding this field. // It is always 0 for ISA < 3.1 instructions. It is // in decoding order. (0 == prefix, 1 == suffix on ISA 3.1) Word uint8 } func (b BitField) String() string { if b.Bits > 1 { return fmt.Sprintf("[%d:%d]", b.Offs, int(b.Offs+b.Bits)-1) } else if b.Bits == 1 { return fmt.Sprintf("[%d]", b.Offs) } else { return fmt.Sprintf("[%d, len=0]", b.Offs) } } // Parse extracts the bitfield b from i, and return it as an unsigned integer. // Parse will panic if b is invalid. func (b BitField) Parse(i [2]uint32) uint32 { if b.Bits > 32 || b.Bits == 0 || b.Offs > 31 || b.Offs+b.Bits > 32 { panic(fmt.Sprintf("invalid bitfiled %v", b)) } return (i[b.Word] >> (32 - b.Offs - b.Bits)) & ((1 << b.Bits) - 1) } // ParseSigned extracts the bitfield b from i, and return it as a signed integer. // ParseSigned will panic if b is invalid. func (b BitField) ParseSigned(i [2]uint32) int32 { u := int32(b.Parse(i)) return u << (32 - b.Bits) >> (32 - b.Bits) } // BitFields is a series of BitFields representing a single number. type BitFields []BitField func (bs BitFields) String() string { ss := make([]string, len(bs)) for i, bf := range bs { ss[i] = bf.String() } return fmt.Sprintf("<%s>", strings.Join(ss, "|")) } func (bs *BitFields) Append(b BitField) { *bs = append(*bs, b) } // parse extracts the bitfields from i, concatenate them and return the result // as an unsigned integer and the total length of all the bitfields. // parse will panic if any bitfield in b is invalid, but it doesn't check if // the sequence of bitfields is reasonable. func (bs BitFields) parse(i [2]uint32) (u uint64, Bits uint8) { for _, b := range bs { u = (u << b.Bits) | uint64(b.Parse(i)) Bits += b.Bits } return u, Bits } // Parse extracts the bitfields from i, concatenate them and return the result // as an unsigned integer. Parse will panic if any bitfield in b is invalid. func (bs BitFields) Parse(i [2]uint32) uint64 { u, _ := bs.parse(i) return u } // ParseSigned extracts the bitfields from i, concatenate them and return the result // as a signed integer. Parse will panic if any bitfield in b is invalid. func (bs BitFields) ParseSigned(i [2]uint32) int64 { u, l := bs.parse(i) return int64(u) << (64 - l) >> (64 - l) } // Count the number of bits in the aggregate BitFields func (bs BitFields) NumBits() int { num := 0 for _, b := range bs { num += int(b.Bits) } return num }
go/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/field.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/arch/ppc64/ppc64asm/field.go", "repo_id": "go", "token_count": 1087 }
180
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package relnote supports working with release notes. // // Its main feature is the ability to merge Markdown fragments into a single // document. (See [Merge].) // // This package has minimal imports, so that it can be vendored into the // main go repo. package relnote import ( "bufio" "bytes" "errors" "fmt" "io" "io/fs" "path" "regexp" "slices" "strconv" "strings" md "rsc.io/markdown" ) // NewParser returns a properly configured Markdown parser. func NewParser() *md.Parser { var p md.Parser p.HeadingIDs = true return &p } // CheckFragment reports problems in a release-note fragment. func CheckFragment(data string) error { doc := NewParser().Parse(data) // Check that the content of the document contains either a TODO or at least one sentence. txt := "" if len(doc.Blocks) > 0 { txt = text(doc) } if !strings.Contains(txt, "TODO") && !strings.ContainsAny(txt, ".?!") { return errors.New("File must contain a complete sentence or a TODO.") } return nil } // text returns all the text in a block, without any formatting. func text(b md.Block) string { switch b := b.(type) { case *md.Document: return blocksText(b.Blocks) case *md.Heading: return text(b.Text) case *md.Text: return inlineText(b.Inline) case *md.CodeBlock: return strings.Join(b.Text, "\n") case *md.HTMLBlock: return strings.Join(b.Text, "\n") case *md.List: return blocksText(b.Items) case *md.Item: return blocksText(b.Blocks) case *md.Empty: return "" case *md.Paragraph: return text(b.Text) case *md.Quote: return blocksText(b.Blocks) case *md.ThematicBreak: return "---" default: panic(fmt.Sprintf("unknown block type %T", b)) } } // blocksText returns all the text in a slice of block nodes. func blocksText(bs []md.Block) string { var d strings.Builder for _, b := range bs { io.WriteString(&d, text(b)) fmt.Fprintln(&d) } return d.String() } // inlineText returns all the next in a slice of inline nodes. func inlineText(ins []md.Inline) string { var buf bytes.Buffer for _, in := range ins { in.PrintText(&buf) } return buf.String() } // Merge combines the markdown documents (files ending in ".md") in the tree rooted // at fs into a single document. // The blocks of the documents are concatenated in lexicographic order by filename. // Heading with no content are removed. // The link keys must be unique, and are combined into a single map. // // Files in the "minor changes" directory (the unique directory matching the glob // "*stdlib/*minor") are named after the package to which they refer, and will have // the package heading inserted automatically and links to other standard library // symbols expanded automatically. For example, if a file *stdlib/minor/bytes/f.md // contains the text // // [Reader] implements [io.Reader]. // // then that will become // // [Reader](/pkg/bytes#Reader) implements [io.Reader](/pkg/io#Reader). func Merge(fsys fs.FS) (*md.Document, error) { filenames, err := sortedMarkdownFilenames(fsys) if err != nil { return nil, err } doc := &md.Document{Links: map[string]*md.Link{}} var prevPkg string // previous stdlib package, if any for _, filename := range filenames { newdoc, err := parseMarkdownFile(fsys, filename) if err != nil { return nil, err } if len(newdoc.Blocks) == 0 { continue } pkg := stdlibPackage(filename) // Autolink Go symbols. addSymbolLinks(newdoc, pkg) if len(doc.Blocks) > 0 { // If this is the first file of a new stdlib package under the "Minor changes // to the library" section, insert a heading for the package. if pkg != "" && pkg != prevPkg { h := stdlibPackageHeading(pkg, lastBlock(doc).Pos().EndLine) doc.Blocks = append(doc.Blocks, h) } prevPkg = pkg // Put a blank line between the current and new blocks, so that the end // of a file acts as a blank line. lastLine := lastBlock(doc).Pos().EndLine delta := lastLine + 2 - newdoc.Blocks[0].Pos().StartLine for _, b := range newdoc.Blocks { addLines(b, delta) } } // Append non-empty blocks to the result document. for _, b := range newdoc.Blocks { if _, ok := b.(*md.Empty); !ok { doc.Blocks = append(doc.Blocks, b) } } // Merge link references. for key, link := range newdoc.Links { if doc.Links[key] != nil { return nil, fmt.Errorf("duplicate link reference %q; second in %s", key, filename) } doc.Links[key] = link } } // Remove headings with empty contents. doc.Blocks = removeEmptySections(doc.Blocks) if len(doc.Blocks) > 0 && len(doc.Links) > 0 { // Add a blank line to separate the links. lastPos := lastBlock(doc).Pos() lastPos.StartLine += 2 lastPos.EndLine += 2 doc.Blocks = append(doc.Blocks, &md.Empty{Position: lastPos}) } return doc, nil } // stdlibPackage returns the standard library package for the given filename. // If the filename does not represent a package, it returns the empty string. // A filename represents package P if it is in a directory matching the glob // "*stdlib/*minor/P". func stdlibPackage(filename string) string { dir, rest, _ := strings.Cut(filename, "/") if !strings.HasSuffix(dir, "stdlib") { return "" } dir, rest, _ = strings.Cut(rest, "/") if !strings.HasSuffix(dir, "minor") { return "" } pkg := path.Dir(rest) if pkg == "." { return "" } return pkg } func stdlibPackageHeading(pkg string, lastLine int) *md.Heading { line := lastLine + 2 pos := md.Position{StartLine: line, EndLine: line} return &md.Heading{ Position: pos, Level: 4, Text: &md.Text{ Position: pos, Inline: []md.Inline{ &md.Link{ Inner: []md.Inline{&md.Code{Text: pkg}}, URL: "/pkg/" + pkg + "/", }, }, }, } } // removeEmptySections removes headings with no content. A heading has no content // if there are no blocks between it and the next heading at the same level, or the // end of the document. func removeEmptySections(bs []md.Block) []md.Block { res := bs[:0] delta := 0 // number of lines by which to adjust positions // Remove preceding headings at same or higher level; they are empty. rem := func(level int) { for len(res) > 0 { last := res[len(res)-1] if lh, ok := last.(*md.Heading); ok && lh.Level >= level { res = res[:len(res)-1] // Adjust subsequent block positions by the size of this block // plus 1 for the blank line between headings. delta += lh.EndLine - lh.StartLine + 2 } else { break } } } for _, b := range bs { if h, ok := b.(*md.Heading); ok { rem(h.Level) } addLines(b, -delta) res = append(res, b) } // Remove empty headings at the end of the document. rem(1) return res } func sortedMarkdownFilenames(fsys fs.FS) ([]string, error) { var filenames []string err := fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { if err != nil { return err } if !d.IsDir() && strings.HasSuffix(path, ".md") { filenames = append(filenames, path) } return nil }) if err != nil { return nil, err } // '.' comes before '/', which comes before alphanumeric characters. // So just sorting the list will put a filename like "net.md" before // the directory "net". That is what we want. slices.Sort(filenames) return filenames, nil } // lastBlock returns the last block in the document. // It panics if the document has no blocks. func lastBlock(doc *md.Document) md.Block { return doc.Blocks[len(doc.Blocks)-1] } // addLines adds n lines to the position of b. // n can be negative. func addLines(b md.Block, n int) { pos := position(b) pos.StartLine += n pos.EndLine += n } func position(b md.Block) *md.Position { switch b := b.(type) { case *md.Heading: return &b.Position case *md.Text: return &b.Position case *md.CodeBlock: return &b.Position case *md.HTMLBlock: return &b.Position case *md.List: return &b.Position case *md.Item: return &b.Position case *md.Empty: return &b.Position case *md.Paragraph: return &b.Position case *md.Quote: return &b.Position case *md.ThematicBreak: return &b.Position default: panic(fmt.Sprintf("unknown block type %T", b)) } } func parseMarkdownFile(fsys fs.FS, path string) (*md.Document, error) { f, err := fsys.Open(path) if err != nil { return nil, err } defer f.Close() data, err := io.ReadAll(f) if err != nil { return nil, err } in := string(data) doc := NewParser().Parse(in) return doc, nil } // An APIFeature is a symbol mentioned in an API file, // like the ones in the main go repo in the api directory. type APIFeature struct { Package string // package that the feature is in Build string // build that the symbol is relevant for (e.g. GOOS, GOARCH) Feature string // everything about the feature other than the package Issue int // the issue that introduced the feature, or 0 if none } // This regexp has four capturing groups: package, build, feature and issue. var apiFileLineRegexp = regexp.MustCompile(`^pkg ([^ \t]+)[ \t]*(\([^)]+\))?, ([^#]*)(#\d+)?$`) // parseAPIFile parses a file in the api format and returns a list of the file's features. // A feature is represented by a single line that looks like // // pkg PKG (BUILD) FEATURE #ISSUE // // where the BUILD and ISSUE may be absent. func parseAPIFile(fsys fs.FS, filename string) ([]APIFeature, error) { f, err := fsys.Open(filename) if err != nil { return nil, err } defer f.Close() var features []APIFeature scan := bufio.NewScanner(f) for scan.Scan() { line := strings.TrimSpace(scan.Text()) if line == "" || line[0] == '#' { continue } matches := apiFileLineRegexp.FindStringSubmatch(line) if len(matches) == 0 { return nil, fmt.Errorf("%s: malformed line %q", filename, line) } if len(matches) != 5 { return nil, fmt.Errorf("wrong number of matches for line %q", line) } f := APIFeature{ Package: matches[1], Build: matches[2], Feature: strings.TrimSpace(matches[3]), } if issue := matches[4]; issue != "" { var err error f.Issue, err = strconv.Atoi(issue[1:]) // skip leading '#' if err != nil { return nil, err } } features = append(features, f) } if scan.Err() != nil { return nil, scan.Err() } return features, nil } // GroupAPIFeaturesByFile returns a map of the given features keyed by // the doc filename that they are associated with. // A feature with package P and issue N should be documented in the file // "P/N.md". func GroupAPIFeaturesByFile(fs []APIFeature) (map[string][]APIFeature, error) { m := map[string][]APIFeature{} for _, f := range fs { if f.Issue == 0 { return nil, fmt.Errorf("%+v: zero issue", f) } filename := fmt.Sprintf("%s/%d.md", f.Package, f.Issue) m[filename] = append(m[filename], f) } return m, nil } // CheckAPIFile reads the api file at filename in apiFS, and checks the corresponding // release-note files under docFS. It checks that the files exist and that they have // some minimal content (see [CheckFragment]). // The docRoot argument is the path from the repo or project root to the root of docFS. // It is used only for error messages. func CheckAPIFile(apiFS fs.FS, filename string, docFS fs.FS, docRoot string) error { features, err := parseAPIFile(apiFS, filename) if err != nil { return err } byFile, err := GroupAPIFeaturesByFile(features) if err != nil { return err } var filenames []string for fn := range byFile { filenames = append(filenames, fn) } slices.Sort(filenames) mcDir, err := minorChangesDir(docFS) if err != nil { return err } var errs []error for _, fn := range filenames { // Use path.Join for consistency with io/fs pathnames. fn = path.Join(mcDir, fn) // TODO(jba): check that the file mentions each feature? if err := checkFragmentFile(docFS, fn); err != nil { errs = append(errs, fmt.Errorf("%s: %v\nSee doc/README.md for more information.", path.Join(docRoot, fn), err)) } } return errors.Join(errs...) } // minorChangesDir returns the unique directory in docFS that corresponds to the // "Minor changes to the standard library" section of the release notes. func minorChangesDir(docFS fs.FS) (string, error) { dirs, err := fs.Glob(docFS, "*stdlib/*minor") if err != nil { return "", err } var bad string if len(dirs) == 0 { bad = "No" } else if len(dirs) > 1 { bad = "More than one" } if bad != "" { return "", fmt.Errorf("%s directory matches *stdlib/*minor.\nThis shouldn't happen; please file a bug at https://go.dev/issues/new.", bad) } return dirs[0], nil } func checkFragmentFile(fsys fs.FS, filename string) error { f, err := fsys.Open(filename) if err != nil { if errors.Is(err, fs.ErrNotExist) { err = errors.New("File does not exist. Every API change must have a corresponding release note file.") } return err } defer f.Close() data, err := io.ReadAll(f) return CheckFragment(string(data)) }
go/src/cmd/vendor/golang.org/x/build/relnote/relnote.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/build/relnote/relnote.go", "repo_id": "go", "token_count": 4768 }
181
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package sumdb import ( "context" "fmt" "sync" "golang.org/x/mod/module" "golang.org/x/mod/sumdb/note" "golang.org/x/mod/sumdb/tlog" ) // NewTestServer constructs a new [TestServer] // that will sign its tree with the given signer key // (see [golang.org/x/mod/sumdb/note]) // and fetch new records as needed by calling gosum. func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer { return &TestServer{signer: signer, gosum: gosum} } // A TestServer is an in-memory implementation of [ServerOps] for testing. type TestServer struct { signer string gosum func(path, vers string) ([]byte, error) mu sync.Mutex hashes testHashes records [][]byte lookup map[string]int64 } // testHashes implements tlog.HashReader, reading from a slice. type testHashes []tlog.Hash func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) { var list []tlog.Hash for _, id := range indexes { list = append(list, h[id]) } return list, nil } func (s *TestServer) Signed(ctx context.Context) ([]byte, error) { s.mu.Lock() defer s.mu.Unlock() size := int64(len(s.records)) h, err := tlog.TreeHash(size, s.hashes) if err != nil { return nil, err } text := tlog.FormatTree(tlog.Tree{N: size, Hash: h}) signer, err := note.NewSigner(s.signer) if err != nil { return nil, err } return note.Sign(&note.Note{Text: string(text)}, signer) } func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) { s.mu.Lock() defer s.mu.Unlock() var list [][]byte for i := int64(0); i < n; i++ { if id+i >= int64(len(s.records)) { return nil, fmt.Errorf("missing records") } list = append(list, s.records[id+i]) } return list, nil } func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) { key := m.String() s.mu.Lock() id, ok := s.lookup[key] s.mu.Unlock() if ok { return id, nil } // Look up module and compute go.sum lines. data, err := s.gosum(m.Path, m.Version) if err != nil { return 0, err } s.mu.Lock() defer s.mu.Unlock() // We ran the fetch without the lock. // If another fetch happened and committed, use it instead. id, ok = s.lookup[key] if ok { return id, nil } // Add record. id = int64(len(s.records)) s.records = append(s.records, data) if s.lookup == nil { s.lookup = make(map[string]int64) } s.lookup[key] = id hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash(data), s.hashes) if err != nil { panic(err) } s.hashes = append(s.hashes, hashes...) return id, nil } func (s *TestServer) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) { s.mu.Lock() defer s.mu.Unlock() return tlog.ReadTileData(t, s.hashes) }
go/src/cmd/vendor/golang.org/x/mod/sumdb/test.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/mod/sumdb/test.go", "repo_id": "go", "token_count": 1163 }
182
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build gc #include "textflag.h" // // System calls for arm, Linux // // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. TEXT ·Syscall(SB),NOSPLIT,$0-28 B syscall·Syscall(SB) TEXT ·Syscall6(SB),NOSPLIT,$0-40 B syscall·Syscall6(SB) TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 BL runtime·entersyscall(SB) MOVW trap+0(FP), R7 MOVW a1+4(FP), R0 MOVW a2+8(FP), R1 MOVW a3+12(FP), R2 MOVW $0, R3 MOVW $0, R4 MOVW $0, R5 SWI $0 MOVW R0, r1+16(FP) MOVW $0, R0 MOVW R0, r2+20(FP) BL runtime·exitsyscall(SB) RET TEXT ·RawSyscall(SB),NOSPLIT,$0-28 B syscall·RawSyscall(SB) TEXT ·RawSyscall6(SB),NOSPLIT,$0-40 B syscall·RawSyscall6(SB) TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 MOVW trap+0(FP), R7 // syscall entry MOVW a1+4(FP), R0 MOVW a2+8(FP), R1 MOVW a3+12(FP), R2 SWI $0 MOVW R0, r1+16(FP) MOVW $0, R0 MOVW R0, r2+20(FP) RET TEXT ·seek(SB),NOSPLIT,$0-28 B syscall·seek(SB)
go/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_arm.s", "repo_id": "go", "token_count": 583 }
183
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build aix && ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. package unix // Major returns the major component of a Linux device number. func Major(dev uint64) uint32 { return uint32((dev >> 16) & 0xffff) } // Minor returns the minor component of a Linux device number. func Minor(dev uint64) uint32 { return uint32(dev & 0xffff) } // Mkdev returns a Linux device number generated from the given major and minor // components. func Mkdev(major, minor uint32) uint64 { return uint64(((major) << 16) | (minor)) }
go/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/unix/dev_aix_ppc.go", "repo_id": "go", "token_count": 214 }
184
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix // Set adds fd to the set fds. func (fds *FdSet) Set(fd int) { fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS)) } // Clear removes fd from the set fds. func (fds *FdSet) Clear(fd int) { fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS)) } // IsSet returns whether fd is in the set fds. func (fds *FdSet) IsSet(fd int) bool { return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0 } // Zero clears the set fds. func (fds *FdSet) Zero() { for i := range fds.Bits { fds.Bits[i] = 0 } }
go/src/cmd/vendor/golang.org/x/sys/unix/fdset.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/unix/fdset.go", "repo_id": "go", "token_count": 324 }
185
// Copyright 2009,2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Darwin system calls. // This file is compiled as ordinary Go code, // but it is also input to mksyscall, // which parses the //sys lines and generates system call stubs. // Note that sometimes we use a lowercase //sys name and wrap // it in our own nicer implementation, either here or in // syscall_bsd.go or syscall_unix.go. package unix import ( "fmt" "syscall" "unsafe" ) //sys closedir(dir uintptr) (err error) //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) func fdopendir(fd int) (dir uintptr, err error) { r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) dir = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } return } var libc_fdopendir_trampoline_addr uintptr //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // Simulate Getdirentries using fdopendir/readdir_r/closedir. // We store the number of entries to skip in the seek // offset of fd. See issue #31368. // It's not the full required semantics, but should handle the case // of calling Getdirentries or ReadDirent repeatedly. // It won't handle assigning the results of lseek to *basep, or handle // the directory being edited underfoot. skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) if err != nil { return 0, err } // We need to duplicate the incoming file descriptor // because the caller expects to retain control of it, but // fdopendir expects to take control of its argument. // Just Dup'ing the file descriptor is not enough, as the // result shares underlying state. Use Openat to make a really // new file descriptor referring to the same directory. fd2, err := Openat(fd, ".", O_RDONLY, 0) if err != nil { return 0, err } d, err := fdopendir(fd2) if err != nil { Close(fd2) return 0, err } defer closedir(d) var cnt int64 for { var entry Dirent var entryp *Dirent e := readdir_r(d, &entry, &entryp) if e != 0 { return n, errnoErr(e) } if entryp == nil { break } if skip > 0 { skip-- cnt++ continue } reclen := int(entry.Reclen) if reclen > len(buf) { // Not enough room. Return for now. // The counter will let us know where we should start up again. // Note: this strategy for suspending in the middle and // restarting is O(n^2) in the length of the directory. Oh well. break } // Copy entry into return buffer. s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) copy(buf, s) buf = buf[reclen:] n += reclen cnt++ } // Set the seek offset of the input fd to record // how many files we've already returned. _, err = Seek(fd, cnt, 0 /* SEEK_SET */) if err != nil { return n, err } return n, nil } // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 Family uint8 Index uint16 Type uint8 Nlen uint8 Alen uint8 Slen uint8 Data [12]int8 raw RawSockaddrDatalink } // SockaddrCtl implements the Sockaddr interface for AF_SYSTEM type sockets. type SockaddrCtl struct { ID uint32 Unit uint32 raw RawSockaddrCtl } func (sa *SockaddrCtl) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Sc_len = SizeofSockaddrCtl sa.raw.Sc_family = AF_SYSTEM sa.raw.Ss_sysaddr = AF_SYS_CONTROL sa.raw.Sc_id = sa.ID sa.raw.Sc_unit = sa.Unit return unsafe.Pointer(&sa.raw), SizeofSockaddrCtl, nil } // SockaddrVM implements the Sockaddr interface for AF_VSOCK type sockets. // SockaddrVM provides access to Darwin VM sockets: a mechanism that enables // bidirectional communication between a hypervisor and its guest virtual // machines. type SockaddrVM struct { // CID and Port specify a context ID and port address for a VM socket. // Guests have a unique CID, and hosts may have a well-known CID of: // - VMADDR_CID_HYPERVISOR: refers to the hypervisor process. // - VMADDR_CID_LOCAL: refers to local communication (loopback). // - VMADDR_CID_HOST: refers to other processes on the host. CID uint32 Port uint32 raw RawSockaddrVM } func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Len = SizeofSockaddrVM sa.raw.Family = AF_VSOCK sa.raw.Port = sa.Port sa.raw.Cid = sa.CID return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } func anyToSockaddrGOOS(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_SYSTEM: pp := (*RawSockaddrCtl)(unsafe.Pointer(rsa)) if pp.Ss_sysaddr == AF_SYS_CONTROL { sa := new(SockaddrCtl) sa.ID = pp.Sc_id sa.Unit = pp.Sc_unit return sa, nil } case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) sa := &SockaddrVM{ CID: pp.Cid, Port: pp.Port, } return sa, nil } return nil, EAFNOSUPPORT } // Some external packages rely on SYS___SYSCTL being defined to implement their // own sysctl wrappers. Provide it here, even though direct syscalls are no // longer supported on darwin. const SYS___SYSCTL = SYS_SYSCTL // Translate "kern.hostname" to []_C_int{0,1,2,3}. func nametomib(name string) (mib []_C_int, err error) { const siz = unsafe.Sizeof(mib[0]) // NOTE(rsc): It seems strange to set the buffer to have // size CTL_MAXNAME+2 but use only CTL_MAXNAME // as the size. I don't know why the +2 is here, but the // kernel uses +2 for its own implementation of this function. // I am scared that if we don't include the +2 here, the kernel // will silently write 2 words farther than we specify // and we'll get memory corruption. var buf [CTL_MAXNAME + 2]_C_int n := uintptr(CTL_MAXNAME) * siz p := (*byte)(unsafe.Pointer(&buf[0])) bytes, err := ByteSliceFromString(name) if err != nil { return nil, err } // Magic sysctl: "setting" 0.3 to a string name // lets you read back the array of integers form. if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { return nil, err } return buf[0 : n/siz], nil } func direntIno(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) } func direntReclen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) } func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) } //sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } var x [2]int32 err = pipe(&x) if err == nil { p[0] = int(x[0]) p[1] = int(x[1]) } return } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var _p0 unsafe.Pointer var bufsize uintptr if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } return getfsstat(_p0, bufsize, flags) } func xattrPointer(dest []byte) *byte { // It's only when dest is set to NULL that the OS X implementations of // getxattr() and listxattr() return the current sizes of the named attributes. // An empty byte array is not sufficient. To maintain the same behaviour as the // linux implementation, we wrap around the system calls and pass in NULL when // dest is empty. var destp *byte if len(dest) > 0 { destp = &dest[0] } return destp } //sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) func Getxattr(path string, attr string, dest []byte) (sz int, err error) { return getxattr(path, attr, xattrPointer(dest), len(dest), 0, 0) } func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { return getxattr(link, attr, xattrPointer(dest), len(dest), 0, XATTR_NOFOLLOW) } //sys fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { return fgetxattr(fd, attr, xattrPointer(dest), len(dest), 0, 0) } //sys setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) func Setxattr(path string, attr string, data []byte, flags int) (err error) { // The parameters for the OS X implementation vary slightly compared to the // linux system call, specifically the position parameter: // // linux: // int setxattr( // const char *path, // const char *name, // const void *value, // size_t size, // int flags // ); // // darwin: // int setxattr( // const char *path, // const char *name, // void *value, // size_t size, // u_int32_t position, // int options // ); // // position specifies the offset within the extended attribute. In the // current implementation, only the resource fork extended attribute makes // use of this argument. For all others, position is reserved. We simply // default to setting it to zero. return setxattr(path, attr, xattrPointer(data), len(data), 0, flags) } func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { return setxattr(link, attr, xattrPointer(data), len(data), 0, flags|XATTR_NOFOLLOW) } //sys fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) func Fsetxattr(fd int, attr string, data []byte, flags int) (err error) { return fsetxattr(fd, attr, xattrPointer(data), len(data), 0, 0) } //sys removexattr(path string, attr string, options int) (err error) func Removexattr(path string, attr string) (err error) { // We wrap around and explicitly zero out the options provided to the OS X // implementation of removexattr, we do so for interoperability with the // linux variant. return removexattr(path, attr, 0) } func Lremovexattr(link string, attr string) (err error) { return removexattr(link, attr, XATTR_NOFOLLOW) } //sys fremovexattr(fd int, attr string, options int) (err error) func Fremovexattr(fd int, attr string) (err error) { return fremovexattr(fd, attr, 0) } //sys listxattr(path string, dest *byte, size int, options int) (sz int, err error) func Listxattr(path string, dest []byte) (sz int, err error) { return listxattr(path, xattrPointer(dest), len(dest), 0) } func Llistxattr(link string, dest []byte) (sz int, err error) { return listxattr(link, xattrPointer(dest), len(dest), XATTR_NOFOLLOW) } //sys flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) func Flistxattr(fd int, dest []byte) (sz int, err error) { return flistxattr(fd, xattrPointer(dest), len(dest), 0) } //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) /* * Wrapped */ //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys kill(pid int, signum int, posix int) (err error) func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) //sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. type IfreqMTU struct { Name [IFNAMSIZ]byte MTU int32 } // IoctlGetIfreqMTU performs the SIOCGIFMTU ioctl operation on fd to get the MTU // of the network device specified by ifname. func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys renamexNp(from string, to string, flag uint32) (err error) func RenamexNp(from string, to string, flag uint32) (err error) { return renamexNp(from, to, flag) } //sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { return renameatxNp(fromfd, from, tofd, to, flag) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { return err } // The version might have newlines or tabs in it, convert them to // spaces. for i, b := range uname.Version { if b == '\n' || b == '\t' { if i == len(uname.Version)-1 { uname.Version[i] = 0 } else { uname.Version[i] = ' ' } } } mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { return err } return nil } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } var length = int64(count) err = sendfile(infd, outfd, *offset, &length, nil, 0) written = int(length) return } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { var value IPMreqn vallen := _Socklen(SizeofIPMreqn) errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, errno } func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } // GetsockoptXucred is a getsockopt wrapper that returns an Xucred struct. // The usual level and opt are SOL_LOCAL and LOCAL_PEERCRED, respectively. func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { x := new(Xucred) vallen := _Socklen(SizeofXucred) err := getsockopt(fd, level, opt, unsafe.Pointer(x), &vallen) return x, err } func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { var value TCPConnectionInfo vallen := _Socklen(SizeofTCPConnectionInfo) err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) return &value, err } func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { return nil, err } var kinfo KinfoProc n := uintptr(SizeofKinfoProc) if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil { return nil, err } if n != SizeofKinfoProc { return nil, EIO } return &kinfo, nil } func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { return nil, err } for { // Find size. n := uintptr(0) if err := sysctl(mib, nil, &n, nil, 0); err != nil { return nil, err } if n == 0 { return nil, nil } if n%SizeofKinfoProc != 0 { return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) } // Read into buffer of that size. buf := make([]KinfoProc, n/SizeofKinfoProc) if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { if err == ENOMEM { // Process table grew. Try again. continue } return nil, err } if n%SizeofKinfoProc != 0 { return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) } // The actual call may return less than the original reported required // size so ensure we deal with that. return buf[:n/SizeofKinfoProc], nil } } //sys pthread_chdir_np(path string) (err error) func PthreadChdir(path string) (err error) { return pthread_chdir_np(path) } //sys pthread_fchdir_np(fd int) (err error) func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) //sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) //sys shmdt(addr uintptr) (err error) //sys shmget(key int, size int, flag int) (id int, err error) /* * Exposed directly */ //sys Access(path string, mode uint32) (err error) //sys Adjtime(delta *Timeval, olddelta *Timeval) (err error) //sys Chdir(path string) (err error) //sys Chflags(path string, flags int) (err error) //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Clonefile(src string, dst string, flags int) (err error) //sys Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exchangedata(path1 string, path2 string, options int) (err error) //sys Exit(code int) //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys Getcwd(buf []byte) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpgid(pid int) (pgid int, err error) //sysnb Getpgrp() (pgrp int) //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tp *Timeval) (err error) //sysnb Getuid() (uid int) //sysnb Issetugid() (tainted bool) //sys Kqueue() (fd int, err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) //sys pwrite(fd int, p []byte, offset int64) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Readlink(path string, buf []byte) (n int, err error) //sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error) //sys Rename(from string, to string) (err error) //sys Renameat(fromfd int, from string, tofd int, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) //sys Setlogin(name string) (err error) //sysnb Setpgid(pid int, pgid int) (err error) //sys Setpriority(which int, who int, prio int) (err error) //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) //sys Umask(newmask int) (oldmask int) //sys Undelete(path string) (err error) //sys Unlink(path string) (err error) //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error)
go/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go", "repo_id": "go", "token_count": 8558 }
186
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build riscv64 && linux package unix import "unsafe" //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) //sys MemfdSecret(flags int) (fd int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { var ts *Timespec if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) func Stat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, 0) } func Lchown(path string, uid int, gid int) (err error) { return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) } func Lstat(path string, stat *Stat_t) (err error) { return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) } //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) func Ustat(dev int, ubuf *Ustat_t) (err error) { return ENOSYS } //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sysnb setgroups(n int, list *_Gid_t) (err error) //sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) //sysnb socket(domain int, typ int, proto int) (fd int, err error) //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) //sysnb Gettimeofday(tv *Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) } ts := []Timespec{ NsecToTimespec(TimevalToNsec(tv[0])), NsecToTimespec(TimevalToNsec(tv[1])), } return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) } func Time(t *Time_t) (Time_t, error) { var tv Timeval err := Gettimeofday(&tv) if err != nil { return 0, err } if t != nil { *t = Time_t(tv.Sec) } return Time_t(tv.Sec), nil } func Utime(path string, buf *Utimbuf) error { tv := []Timeval{ {Sec: buf.Actime}, {Sec: buf.Modtime}, } return Utimes(path, tv) } func utimes(path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(AT_FDCWD, path, nil, 0) } ts := []Timespec{ NsecToTimespec(TimevalToNsec(tv[0])), NsecToTimespec(TimevalToNsec(tv[1])), } return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) } func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } func (msghdr *Msghdr) SetIovlen(length int) { msghdr.Iovlen = uint64(length) } func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { rsa.Service_name_len = uint64(length) } func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err } func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) } //sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { cmdlineLen := len(cmdline) if cmdlineLen > 0 { // Account for the additional NULL byte added by // BytePtrFromString in kexecFileLoad. The kexec_file_load // syscall expects a NULL-terminated string. cmdlineLen++ } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } //sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { var setSize uintptr if set != nil { setSize = uintptr(unsafe.Sizeof(*set)) } return riscvHWProbe(pairs, setSize, set, flags) }
go/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go", "repo_id": "go", "token_count": 2578 }
187
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Windows environment variables. package windows import ( "syscall" "unsafe" ) func Getenv(key string) (value string, found bool) { return syscall.Getenv(key) } func Setenv(key, value string) error { return syscall.Setenv(key, value) } func Clearenv() { syscall.Clearenv() } func Environ() []string { return syscall.Environ() } // Returns a default environment associated with the token, rather than the current // process. If inheritExisting is true, then this environment also inherits the // environment of the current process. func (token Token) Environ(inheritExisting bool) (env []string, err error) { var block *uint16 err = CreateEnvironmentBlock(&block, token, inheritExisting) if err != nil { return nil, err } defer DestroyEnvironmentBlock(block) size := unsafe.Sizeof(*block) for *block != 0 { // find NUL terminator end := unsafe.Pointer(block) for *(*uint16)(end) != 0 { end = unsafe.Add(end, size) } entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) env = append(env, UTF16ToString(entry)) block = (*uint16)(unsafe.Add(end, size)) } return env, nil } func Unsetenv(key string) error { return syscall.Unsetenv(key) }
go/src/cmd/vendor/golang.org/x/sys/windows/env_windows.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/windows/env_windows.go", "repo_id": "go", "token_count": 462 }
188
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package windows type WSAData struct { Version uint16 HighVersion uint16 Description [WSADESCRIPTION_LEN + 1]byte SystemStatus [WSASYS_STATUS_LEN + 1]byte MaxSockets uint16 MaxUdpDg uint16 VendorInfo *byte } type Servent struct { Name *byte Aliases **byte Port uint16 Proto *byte } type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { PerProcessUserTimeLimit int64 PerJobUserTimeLimit int64 LimitFlags uint32 MinimumWorkingSetSize uintptr MaximumWorkingSetSize uintptr ActiveProcessLimit uint32 Affinity uintptr PriorityClass uint32 SchedulingClass uint32 _ uint32 // pad to 8 byte boundary }
go/src/cmd/vendor/golang.org/x/sys/windows/types_windows_386.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/sys/windows/types_windows_386.go", "repo_id": "go", "token_count": 357 }
189
# Go Telemetry This repository holds the Go Telemetry server code and libraries, used for hosting [telemetry.go.dev](https://telemetry.go.dev) and instrumenting Go toolchain programs with opt-in telemetry. **Warning**: this repository is intended for use only in tools maintained by the Go team, including tools in the Go distribution and auxiliary tools like [gopls](https://pkg.go.dev/golang.org/x/tools/gopls) or [govulncheck](https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck). There are no compatibility guarantees for any of the packages here: public APIs will change in breaking ways as the telemetry integration is refined. ## Notable Packages - The [x/telemetry/counter](https://pkg.go.dev/golang.org/x/telemetry/counter) package provides a library for instrumenting programs with counters and stack reports. - The [x/telemetry/upload](https://pkg.go.dev/golang.org/x/telemetry/upload) package provides a hook for Go toolchain programs to upload telemetry data, if the user has opted in to telemetry uploading. - The [x/telemetry/cmd/gotelemetry](https://pkg.go.dev/pkg/golang.org/x/telemetry/cmd/gotelemetry) command is used for managing telemetry data and configuration. - The [x/telemetry/config](https://pkg.go.dev/pkg/golang.org/x/telemetry/config) package defines the subset of telemetry data that has been approved for uploading by the telemetry proposal process. - The [x/telemetry/godev](https://pkg.go.dev/pkg/golang.org/x/telemetry/godev) directory defines the services running at [telemetry.go.dev](https://telemetry.go.dev). ## Contributing This repository uses Gerrit for code changes. To learn how to submit changes to this repository, see https://golang.org/doc/contribute.html. The main issue tracker for the time repository is located at https://github.com/golang/go/issues. Prefix your issue with "x/telemetry:" in the subject line, so it is easy to find. ### Linting & Formatting This repository uses [eslint](https://eslint.org/) to format TS files, [stylelint](https://stylelint.io/) to format CSS files, and [prettier](https://prettier.io/) to format TS, CSS, Markdown, and YAML files. See the style guides: - [TypeScript](https://google.github.io/styleguide/tsguide.html) - [CSS](https://go.dev/wiki/CSSStyleGuide) It is encouraged that all TS and CSS code be run through formatters before submitting a change. However, it is not a strict requirement enforced by CI. ### Installing npm Dependencies: 1. Install [docker](https://docs.docker.com/get-docker/) 2. Run `./npm install` ### Run ESLint, Stylelint, & Prettier ./npm run all
go/src/cmd/vendor/golang.org/x/telemetry/README.md/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/telemetry/README.md", "repo_id": "go", "token_count": 802 }
190
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This package is a lightly modified version of the mmap code // in github.com/google/codesearch/index. // The mmap package provides an abstraction for memory mapping files // on different platforms. package mmap import ( "os" ) // The backing file is never closed, so Data // remains valid for the lifetime of the process. type Data struct { // TODO(pjw): might be better to define versions of Data // for the 3 specializations f *os.File Data []byte // Some windows magic Windows interface{} } // Mmap maps the given file into memory. // When remapping a file, pass the most recently returned Data. func Mmap(f *os.File) (*Data, error) { return mmapFile(f) } // Munmap unmaps the given file from memory. func Munmap(d *Data) error { return munmapFile(d) }
go/src/cmd/vendor/golang.org/x/telemetry/internal/mmap/mmap.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/telemetry/internal/mmap/mmap.go", "repo_id": "go", "token_count": 278 }
191
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build aix || linux || solaris || zos package term import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TCGETS const ioctlWriteTermios = unix.TCSETS
go/src/cmd/vendor/golang.org/x/term/term_unix_other.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/term/term_unix_other.go", "repo_id": "go", "token_count": 106 }
192
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package language import ( "bytes" "errors" "fmt" "sort" "golang.org/x/text/internal/tag" ) // isAlpha returns true if the byte is not a digit. // b must be an ASCII letter or digit. func isAlpha(b byte) bool { return b > '9' } // isAlphaNum returns true if the string contains only ASCII letters or digits. func isAlphaNum(s []byte) bool { for _, c := range s { if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { return false } } return true } // ErrSyntax is returned by any of the parsing functions when the // input is not well-formed, according to BCP 47. // TODO: return the position at which the syntax error occurred? var ErrSyntax = errors.New("language: tag is not well-formed") // ErrDuplicateKey is returned when a tag contains the same key twice with // different values in the -u section. var ErrDuplicateKey = errors.New("language: different values for same key in -u extension") // ValueError is returned by any of the parsing functions when the // input is well-formed but the respective subtag is not recognized // as a valid value. type ValueError struct { v [8]byte } // NewValueError creates a new ValueError. func NewValueError(tag []byte) ValueError { var e ValueError copy(e.v[:], tag) return e } func (e ValueError) tag() []byte { n := bytes.IndexByte(e.v[:], 0) if n == -1 { n = 8 } return e.v[:n] } // Error implements the error interface. func (e ValueError) Error() string { return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) } // Subtag returns the subtag for which the error occurred. func (e ValueError) Subtag() string { return string(e.tag()) } // scanner is used to scan BCP 47 tokens, which are separated by _ or -. type scanner struct { b []byte bytes [max99thPercentileSize]byte token []byte start int // start position of the current token end int // end position of the current token next int // next point for scan err error done bool } func makeScannerString(s string) scanner { scan := scanner{} if len(s) <= len(scan.bytes) { scan.b = scan.bytes[:copy(scan.bytes[:], s)] } else { scan.b = []byte(s) } scan.init() return scan } // makeScanner returns a scanner using b as the input buffer. // b is not copied and may be modified by the scanner routines. func makeScanner(b []byte) scanner { scan := scanner{b: b} scan.init() return scan } func (s *scanner) init() { for i, c := range s.b { if c == '_' { s.b[i] = '-' } } s.scan() } // restToLower converts the string between start and end to lower case. func (s *scanner) toLower(start, end int) { for i := start; i < end; i++ { c := s.b[i] if 'A' <= c && c <= 'Z' { s.b[i] += 'a' - 'A' } } } func (s *scanner) setError(e error) { if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) { s.err = e } } // resizeRange shrinks or grows the array at position oldStart such that // a new string of size newSize can fit between oldStart and oldEnd. // Sets the scan point to after the resized range. func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { s.start = oldStart if end := oldStart + newSize; end != oldEnd { diff := end - oldEnd var b []byte if n := len(s.b) + diff; n > cap(s.b) { b = make([]byte, n) copy(b, s.b[:oldStart]) } else { b = s.b[:n] } copy(b[end:], s.b[oldEnd:]) s.b = b s.next = end + (s.next - s.end) s.end = end } } // replace replaces the current token with repl. func (s *scanner) replace(repl string) { s.resizeRange(s.start, s.end, len(repl)) copy(s.b[s.start:], repl) } // gobble removes the current token from the input. // Caller must call scan after calling gobble. func (s *scanner) gobble(e error) { s.setError(e) if s.start == 0 { s.b = s.b[:+copy(s.b, s.b[s.next:])] s.end = 0 } else { s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] s.end = s.start - 1 } s.next = s.start } // deleteRange removes the given range from s.b before the current token. func (s *scanner) deleteRange(start, end int) { s.b = s.b[:start+copy(s.b[start:], s.b[end:])] diff := end - start s.next -= diff s.start -= diff s.end -= diff } // scan parses the next token of a BCP 47 string. Tokens that are larger // than 8 characters or include non-alphanumeric characters result in an error // and are gobbled and removed from the output. // It returns the end position of the last token consumed. func (s *scanner) scan() (end int) { end = s.end s.token = nil for s.start = s.next; s.next < len(s.b); { i := bytes.IndexByte(s.b[s.next:], '-') if i == -1 { s.end = len(s.b) s.next = len(s.b) i = s.end - s.start } else { s.end = s.next + i s.next = s.end + 1 } token := s.b[s.start:s.end] if i < 1 || i > 8 || !isAlphaNum(token) { s.gobble(ErrSyntax) continue } s.token = token return end } if n := len(s.b); n > 0 && s.b[n-1] == '-' { s.setError(ErrSyntax) s.b = s.b[:len(s.b)-1] } s.done = true return end } // acceptMinSize parses multiple tokens of the given size or greater. // It returns the end position of the last token consumed. func (s *scanner) acceptMinSize(min int) (end int) { end = s.end s.scan() for ; len(s.token) >= min; s.scan() { end = s.end } return end } // Parse parses the given BCP 47 string and returns a valid Tag. If parsing // failed it returns an error and any part of the tag that could be parsed. // If parsing succeeded but an unknown value was found, it returns // ValueError. The Tag returned in this case is just stripped of the unknown // value. All other values are preserved. It accepts tags in the BCP 47 format // and extensions to this standard defined in // https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. func Parse(s string) (t Tag, err error) { // TODO: consider supporting old-style locale key-value pairs. if s == "" { return Und, ErrSyntax } defer func() { if recover() != nil { t = Und err = ErrSyntax return } }() if len(s) <= maxAltTaglen { b := [maxAltTaglen]byte{} for i, c := range s { // Generating invalid UTF-8 is okay as it won't match. if 'A' <= c && c <= 'Z' { c += 'a' - 'A' } else if c == '_' { c = '-' } b[i] = byte(c) } if t, ok := grandfathered(b); ok { return t, nil } } scan := makeScannerString(s) return parse(&scan, s) } func parse(scan *scanner, s string) (t Tag, err error) { t = Und var end int if n := len(scan.token); n <= 1 { scan.toLower(0, len(scan.b)) if n == 0 || scan.token[0] != 'x' { return t, ErrSyntax } end = parseExtensions(scan) } else if n >= 4 { return Und, ErrSyntax } else { // the usual case t, end = parseTag(scan, true) if n := len(scan.token); n == 1 { t.pExt = uint16(end) end = parseExtensions(scan) } else if end < len(scan.b) { scan.setError(ErrSyntax) scan.b = scan.b[:end] } } if int(t.pVariant) < len(scan.b) { if end < len(s) { s = s[:end] } if len(s) > 0 && tag.Compare(s, scan.b) == 0 { t.str = s } else { t.str = string(scan.b) } } else { t.pVariant, t.pExt = 0, 0 } return t, scan.err } // parseTag parses language, script, region and variants. // It returns a Tag and the end position in the input that was parsed. // If doNorm is true, then <lang>-<extlang> will be normalized to <extlang>. func parseTag(scan *scanner, doNorm bool) (t Tag, end int) { var e error // TODO: set an error if an unknown lang, script or region is encountered. t.LangID, e = getLangID(scan.token) scan.setError(e) scan.replace(t.LangID.String()) langStart := scan.start end = scan.scan() for len(scan.token) == 3 && isAlpha(scan.token[0]) { // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent // to a tag of the form <extlang>. if doNorm { lang, e := getLangID(scan.token) if lang != 0 { t.LangID = lang langStr := lang.String() copy(scan.b[langStart:], langStr) scan.b[langStart+len(langStr)] = '-' scan.start = langStart + len(langStr) + 1 } scan.gobble(e) } end = scan.scan() } if len(scan.token) == 4 && isAlpha(scan.token[0]) { t.ScriptID, e = getScriptID(script, scan.token) if t.ScriptID == 0 { scan.gobble(e) } end = scan.scan() } if n := len(scan.token); n >= 2 && n <= 3 { t.RegionID, e = getRegionID(scan.token) if t.RegionID == 0 { scan.gobble(e) } else { scan.replace(t.RegionID.String()) } end = scan.scan() } scan.toLower(scan.start, len(scan.b)) t.pVariant = byte(end) end = parseVariants(scan, end, t) t.pExt = uint16(end) return t, end } var separator = []byte{'-'} // parseVariants scans tokens as long as each token is a valid variant string. // Duplicate variants are removed. func parseVariants(scan *scanner, end int, t Tag) int { start := scan.start varIDBuf := [4]uint8{} variantBuf := [4][]byte{} varID := varIDBuf[:0] variant := variantBuf[:0] last := -1 needSort := false for ; len(scan.token) >= 4; scan.scan() { // TODO: measure the impact of needing this conversion and redesign // the data structure if there is an issue. v, ok := variantIndex[string(scan.token)] if !ok { // unknown variant // TODO: allow user-defined variants? scan.gobble(NewValueError(scan.token)) continue } varID = append(varID, v) variant = append(variant, scan.token) if !needSort { if last < int(v) { last = int(v) } else { needSort = true // There is no legal combinations of more than 7 variants // (and this is by no means a useful sequence). const maxVariants = 8 if len(varID) > maxVariants { break } } } end = scan.end } if needSort { sort.Sort(variantsSort{varID, variant}) k, l := 0, -1 for i, v := range varID { w := int(v) if l == w { // Remove duplicates. continue } varID[k] = varID[i] variant[k] = variant[i] k++ l = w } if str := bytes.Join(variant[:k], separator); len(str) == 0 { end = start - 1 } else { scan.resizeRange(start, end, len(str)) copy(scan.b[scan.start:], str) end = scan.end } } return end } type variantsSort struct { i []uint8 v [][]byte } func (s variantsSort) Len() int { return len(s.i) } func (s variantsSort) Swap(i, j int) { s.i[i], s.i[j] = s.i[j], s.i[i] s.v[i], s.v[j] = s.v[j], s.v[i] } func (s variantsSort) Less(i, j int) bool { return s.i[i] < s.i[j] } type bytesSort struct { b [][]byte n int // first n bytes to compare } func (b bytesSort) Len() int { return len(b.b) } func (b bytesSort) Swap(i, j int) { b.b[i], b.b[j] = b.b[j], b.b[i] } func (b bytesSort) Less(i, j int) bool { for k := 0; k < b.n; k++ { if b.b[i][k] == b.b[j][k] { continue } return b.b[i][k] < b.b[j][k] } return false } // parseExtensions parses and normalizes the extensions in the buffer. // It returns the last position of scan.b that is part of any extension. // It also trims scan.b to remove excess parts accordingly. func parseExtensions(scan *scanner) int { start := scan.start exts := [][]byte{} private := []byte{} end := scan.end for len(scan.token) == 1 { extStart := scan.start ext := scan.token[0] end = parseExtension(scan) extension := scan.b[extStart:end] if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { scan.setError(ErrSyntax) end = extStart continue } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { scan.b = scan.b[:end] return end } else if ext == 'x' { private = extension break } exts = append(exts, extension) } sort.Sort(bytesSort{exts, 1}) if len(private) > 0 { exts = append(exts, private) } scan.b = scan.b[:start] if len(exts) > 0 { scan.b = append(scan.b, bytes.Join(exts, separator)...) } else if start > 0 { // Strip trailing '-'. scan.b = scan.b[:start-1] } return end } // parseExtension parses a single extension and returns the position of // the extension end. func parseExtension(scan *scanner) int { start, end := scan.start, scan.end switch scan.token[0] { case 'u': // https://www.ietf.org/rfc/rfc6067.txt attrStart := end scan.scan() for last := []byte{}; len(scan.token) > 2; scan.scan() { if bytes.Compare(scan.token, last) != -1 { // Attributes are unsorted. Start over from scratch. p := attrStart + 1 scan.next = p attrs := [][]byte{} for scan.scan(); len(scan.token) > 2; scan.scan() { attrs = append(attrs, scan.token) end = scan.end } sort.Sort(bytesSort{attrs, 3}) copy(scan.b[p:], bytes.Join(attrs, separator)) break } last = scan.token end = scan.end } // Scan key-type sequences. A key is of length 2 and may be followed // by 0 or more "type" subtags from 3 to the maximum of 8 letters. var last, key []byte for attrEnd := end; len(scan.token) == 2; last = key { key = scan.token end = scan.end for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { end = scan.end } // TODO: check key value validity if bytes.Compare(key, last) != 1 || scan.err != nil { // We have an invalid key or the keys are not sorted. // Start scanning keys from scratch and reorder. p := attrEnd + 1 scan.next = p keys := [][]byte{} for scan.scan(); len(scan.token) == 2; { keyStart := scan.start end = scan.end for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { end = scan.end } keys = append(keys, scan.b[keyStart:end]) } sort.Stable(bytesSort{keys, 2}) if n := len(keys); n > 0 { k := 0 for i := 1; i < n; i++ { if !bytes.Equal(keys[k][:2], keys[i][:2]) { k++ keys[k] = keys[i] } else if !bytes.Equal(keys[k], keys[i]) { scan.setError(ErrDuplicateKey) } } keys = keys[:k+1] } reordered := bytes.Join(keys, separator) if e := p + len(reordered); e < end { scan.deleteRange(e, end) end = e } copy(scan.b[p:], reordered) break } } case 't': // https://www.ietf.org/rfc/rfc6497.txt scan.scan() if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { _, end = parseTag(scan, false) scan.toLower(start, end) } for len(scan.token) == 2 && !isAlpha(scan.token[1]) { end = scan.acceptMinSize(3) } case 'x': end = scan.acceptMinSize(1) default: end = scan.acceptMinSize(2) } return end } // getExtension returns the name, body and end position of the extension. func getExtension(s string, p int) (end int, ext string) { if s[p] == '-' { p++ } if s[p] == 'x' { return len(s), s[p:] } end = nextExtension(s, p) return end, s[p:end] } // nextExtension finds the next extension within the string, searching // for the -<char>- pattern from position p. // In the fast majority of cases, language tags will have at most // one extension and extensions tend to be small. func nextExtension(s string, p int) int { for n := len(s) - 3; p < n; { if s[p] == '-' { if s[p+2] == '-' { return p } p += 3 } else { p++ } } return len(s) }
go/src/cmd/vendor/golang.org/x/text/internal/language/parse.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/text/internal/language/parse.go", "repo_id": "go", "token_count": 6353 }
193
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package norm import ( "fmt" "unicode/utf8" ) // MaxSegmentSize is the maximum size of a byte buffer needed to consider any // sequence of starter and non-starter runes for the purpose of normalization. const MaxSegmentSize = maxByteBufferSize // An Iter iterates over a string or byte slice, while normalizing it // to a given Form. type Iter struct { rb reorderBuffer buf [maxByteBufferSize]byte info Properties // first character saved from previous iteration next iterFunc // implementation of next depends on form asciiF iterFunc p int // current position in input source multiSeg []byte // remainder of multi-segment decomposition } type iterFunc func(*Iter) []byte // Init initializes i to iterate over src after normalizing it to Form f. func (i *Iter) Init(f Form, src []byte) { i.p = 0 if len(src) == 0 { i.setDone() i.rb.nsrc = 0 return } i.multiSeg = nil i.rb.init(f, src) i.next = i.rb.f.nextMain i.asciiF = nextASCIIBytes i.info = i.rb.f.info(i.rb.src, i.p) i.rb.ss.first(i.info) } // InitString initializes i to iterate over src after normalizing it to Form f. func (i *Iter) InitString(f Form, src string) { i.p = 0 if len(src) == 0 { i.setDone() i.rb.nsrc = 0 return } i.multiSeg = nil i.rb.initString(f, src) i.next = i.rb.f.nextMain i.asciiF = nextASCIIString i.info = i.rb.f.info(i.rb.src, i.p) i.rb.ss.first(i.info) } // Seek sets the segment to be returned by the next call to Next to start // at position p. It is the responsibility of the caller to set p to the // start of a segment. func (i *Iter) Seek(offset int64, whence int) (int64, error) { var abs int64 switch whence { case 0: abs = offset case 1: abs = int64(i.p) + offset case 2: abs = int64(i.rb.nsrc) + offset default: return 0, fmt.Errorf("norm: invalid whence") } if abs < 0 { return 0, fmt.Errorf("norm: negative position") } if int(abs) >= i.rb.nsrc { i.setDone() return int64(i.p), nil } i.p = int(abs) i.multiSeg = nil i.next = i.rb.f.nextMain i.info = i.rb.f.info(i.rb.src, i.p) i.rb.ss.first(i.info) return abs, nil } // returnSlice returns a slice of the underlying input type as a byte slice. // If the underlying is of type []byte, it will simply return a slice. // If the underlying is of type string, it will copy the slice to the buffer // and return that. func (i *Iter) returnSlice(a, b int) []byte { if i.rb.src.bytes == nil { return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])] } return i.rb.src.bytes[a:b] } // Pos returns the byte position at which the next call to Next will commence processing. func (i *Iter) Pos() int { return i.p } func (i *Iter) setDone() { i.next = nextDone i.p = i.rb.nsrc } // Done returns true if there is no more input to process. func (i *Iter) Done() bool { return i.p >= i.rb.nsrc } // Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input. // For any input a and b for which f(a) == f(b), subsequent calls // to Next will return the same segments. // Modifying runes are grouped together with the preceding starter, if such a starter exists. // Although not guaranteed, n will typically be the smallest possible n. func (i *Iter) Next() []byte { return i.next(i) } func nextASCIIBytes(i *Iter) []byte { p := i.p + 1 if p >= i.rb.nsrc { p0 := i.p i.setDone() return i.rb.src.bytes[p0:p] } if i.rb.src.bytes[p] < utf8.RuneSelf { p0 := i.p i.p = p return i.rb.src.bytes[p0:p] } i.info = i.rb.f.info(i.rb.src, i.p) i.next = i.rb.f.nextMain return i.next(i) } func nextASCIIString(i *Iter) []byte { p := i.p + 1 if p >= i.rb.nsrc { i.buf[0] = i.rb.src.str[i.p] i.setDone() return i.buf[:1] } if i.rb.src.str[p] < utf8.RuneSelf { i.buf[0] = i.rb.src.str[i.p] i.p = p return i.buf[:1] } i.info = i.rb.f.info(i.rb.src, i.p) i.next = i.rb.f.nextMain return i.next(i) } func nextHangul(i *Iter) []byte { p := i.p next := p + hangulUTF8Size if next >= i.rb.nsrc { i.setDone() } else if i.rb.src.hangul(next) == 0 { i.rb.ss.next(i.info) i.info = i.rb.f.info(i.rb.src, i.p) i.next = i.rb.f.nextMain return i.next(i) } i.p = next return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))] } func nextDone(i *Iter) []byte { return nil } // nextMulti is used for iterating over multi-segment decompositions // for decomposing normal forms. func nextMulti(i *Iter) []byte { j := 0 d := i.multiSeg // skip first rune for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ { } for j < len(d) { info := i.rb.f.info(input{bytes: d}, j) if info.BoundaryBefore() { i.multiSeg = d[j:] return d[:j] } j += int(info.size) } // treat last segment as normal decomposition i.next = i.rb.f.nextMain return i.next(i) } // nextMultiNorm is used for iterating over multi-segment decompositions // for composing normal forms. func nextMultiNorm(i *Iter) []byte { j := 0 d := i.multiSeg for j < len(d) { info := i.rb.f.info(input{bytes: d}, j) if info.BoundaryBefore() { i.rb.compose() seg := i.buf[:i.rb.flushCopy(i.buf[:])] i.rb.insertUnsafe(input{bytes: d}, j, info) i.multiSeg = d[j+int(info.size):] return seg } i.rb.insertUnsafe(input{bytes: d}, j, info) j += int(info.size) } i.multiSeg = nil i.next = nextComposed return doNormComposed(i) } // nextDecomposed is the implementation of Next for forms NFD and NFKD. func nextDecomposed(i *Iter) (next []byte) { outp := 0 inCopyStart, outCopyStart := i.p, 0 for { if sz := int(i.info.size); sz <= 1 { i.rb.ss = 0 p := i.p i.p++ // ASCII or illegal byte. Either way, advance by 1. if i.p >= i.rb.nsrc { i.setDone() return i.returnSlice(p, i.p) } else if i.rb.src._byte(i.p) < utf8.RuneSelf { i.next = i.asciiF return i.returnSlice(p, i.p) } outp++ } else if d := i.info.Decomposition(); d != nil { // Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero. // Case 1: there is a leftover to copy. In this case the decomposition // must begin with a modifier and should always be appended. // Case 2: no leftover. Simply return d if followed by a ccc == 0 value. p := outp + len(d) if outp > 0 { i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) // TODO: this condition should not be possible, but we leave it // in for defensive purposes. if p > len(i.buf) { return i.buf[:outp] } } else if i.info.multiSegment() { // outp must be 0 as multi-segment decompositions always // start a new segment. if i.multiSeg == nil { i.multiSeg = d i.next = nextMulti return nextMulti(i) } // We are in the last segment. Treat as normal decomposition. d = i.multiSeg i.multiSeg = nil p = len(d) } prevCC := i.info.tccc if i.p += sz; i.p >= i.rb.nsrc { i.setDone() i.info = Properties{} // Force BoundaryBefore to succeed. } else { i.info = i.rb.f.info(i.rb.src, i.p) } switch i.rb.ss.next(i.info) { case ssOverflow: i.next = nextCGJDecompose fallthrough case ssStarter: if outp > 0 { copy(i.buf[outp:], d) return i.buf[:p] } return d } copy(i.buf[outp:], d) outp = p inCopyStart, outCopyStart = i.p, outp if i.info.ccc < prevCC { goto doNorm } continue } else if r := i.rb.src.hangul(i.p); r != 0 { outp = decomposeHangul(i.buf[:], r) i.p += hangulUTF8Size inCopyStart, outCopyStart = i.p, outp if i.p >= i.rb.nsrc { i.setDone() break } else if i.rb.src.hangul(i.p) != 0 { i.next = nextHangul return i.buf[:outp] } } else { p := outp + sz if p > len(i.buf) { break } outp = p i.p += sz } if i.p >= i.rb.nsrc { i.setDone() break } prevCC := i.info.tccc i.info = i.rb.f.info(i.rb.src, i.p) if v := i.rb.ss.next(i.info); v == ssStarter { break } else if v == ssOverflow { i.next = nextCGJDecompose break } if i.info.ccc < prevCC { goto doNorm } } if outCopyStart == 0 { return i.returnSlice(inCopyStart, i.p) } else if inCopyStart < i.p { i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) } return i.buf[:outp] doNorm: // Insert what we have decomposed so far in the reorderBuffer. // As we will only reorder, there will always be enough room. i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) i.rb.insertDecomposed(i.buf[0:outp]) return doNormDecomposed(i) } func doNormDecomposed(i *Iter) []byte { for { i.rb.insertUnsafe(i.rb.src, i.p, i.info) if i.p += int(i.info.size); i.p >= i.rb.nsrc { i.setDone() break } i.info = i.rb.f.info(i.rb.src, i.p) if i.info.ccc == 0 { break } if s := i.rb.ss.next(i.info); s == ssOverflow { i.next = nextCGJDecompose break } } // new segment or too many combining characters: exit normalization return i.buf[:i.rb.flushCopy(i.buf[:])] } func nextCGJDecompose(i *Iter) []byte { i.rb.ss = 0 i.rb.insertCGJ() i.next = nextDecomposed i.rb.ss.first(i.info) buf := doNormDecomposed(i) return buf } // nextComposed is the implementation of Next for forms NFC and NFKC. func nextComposed(i *Iter) []byte { outp, startp := 0, i.p var prevCC uint8 for { if !i.info.isYesC() { goto doNorm } prevCC = i.info.tccc sz := int(i.info.size) if sz == 0 { sz = 1 // illegal rune: copy byte-by-byte } p := outp + sz if p > len(i.buf) { break } outp = p i.p += sz if i.p >= i.rb.nsrc { i.setDone() break } else if i.rb.src._byte(i.p) < utf8.RuneSelf { i.rb.ss = 0 i.next = i.asciiF break } i.info = i.rb.f.info(i.rb.src, i.p) if v := i.rb.ss.next(i.info); v == ssStarter { break } else if v == ssOverflow { i.next = nextCGJCompose break } if i.info.ccc < prevCC { goto doNorm } } return i.returnSlice(startp, i.p) doNorm: // reset to start position i.p = startp i.info = i.rb.f.info(i.rb.src, i.p) i.rb.ss.first(i.info) if i.info.multiSegment() { d := i.info.Decomposition() info := i.rb.f.info(input{bytes: d}, 0) i.rb.insertUnsafe(input{bytes: d}, 0, info) i.multiSeg = d[int(info.size):] i.next = nextMultiNorm return nextMultiNorm(i) } i.rb.ss.first(i.info) i.rb.insertUnsafe(i.rb.src, i.p, i.info) return doNormComposed(i) } func doNormComposed(i *Iter) []byte { // First rune should already be inserted. for { if i.p += int(i.info.size); i.p >= i.rb.nsrc { i.setDone() break } i.info = i.rb.f.info(i.rb.src, i.p) if s := i.rb.ss.next(i.info); s == ssStarter { break } else if s == ssOverflow { i.next = nextCGJCompose break } i.rb.insertUnsafe(i.rb.src, i.p, i.info) } i.rb.compose() seg := i.buf[:i.rb.flushCopy(i.buf[:])] return seg } func nextCGJCompose(i *Iter) []byte { i.rb.ss = 0 // instead of first i.rb.insertCGJ() i.next = nextComposed // Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter, // even if they are not. This is particularly dubious for U+FF9E and UFF9A. // If we ever change that, insert a check here. i.rb.ss.first(i.info) i.rb.insertUnsafe(i.rb.src, i.p, i.info) return doNormComposed(i) }
go/src/cmd/vendor/golang.org/x/text/unicode/norm/iter.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/text/unicode/norm/iter.go", "repo_id": "go", "token_count": 5111 }
194
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package inspect defines an Analyzer that provides an AST inspector // (golang.org/x/tools/go/ast/inspector.Inspector) for the syntax trees // of a package. It is only a building block for other analyzers. // // Example of use in another analysis: // // import ( // "golang.org/x/tools/go/analysis" // "golang.org/x/tools/go/analysis/passes/inspect" // "golang.org/x/tools/go/ast/inspector" // ) // // var Analyzer = &analysis.Analyzer{ // ... // Requires: []*analysis.Analyzer{inspect.Analyzer}, // } // // func run(pass *analysis.Pass) (interface{}, error) { // inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // inspect.Preorder(nil, func(n ast.Node) { // ... // }) // return nil, nil // } package inspect import ( "reflect" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/inspector" ) var Analyzer = &analysis.Analyzer{ Name: "inspect", Doc: "optimize AST traversal for later passes", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, ResultType: reflect.TypeOf(new(inspector.Inspector)), } func run(pass *analysis.Pass) (interface{}, error) { return inspector.New(pass.Files), nil }
go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go", "repo_id": "go", "token_count": 574 }
195
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // TODO(jba) deduce which functions wrap the log/slog functions, and use the // fact mechanism to propagate this information, so we can provide diagnostics // for user-supplied wrappers. package slog import ( _ "embed" "fmt" "go/ast" "go/token" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", Doc: analysisutil.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } var stringType = types.Universe.Lookup("string").Type() // A position describes what is expected to appear in an argument position. type position int const ( // key is an argument position that should hold a string key or an Attr. key position = iota // value is an argument position that should hold a value. value // unknown represents that we do not know if position should hold a key or a value. unknown ) func run(pass *analysis.Pass) (any, error) { var attrType types.Type // The type of slog.Attr inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.CallExpr)(nil), } inspect.Preorder(nodeFilter, func(node ast.Node) { call := node.(*ast.CallExpr) fn := typeutil.StaticCallee(pass.TypesInfo, call) if fn == nil { return // not a static call } if call.Ellipsis != token.NoPos { return // skip calls with "..." args } skipArgs, ok := kvFuncSkipArgs(fn) if !ok { // Not a slog function that takes key-value pairs. return } // Here we know that fn.Pkg() is "log/slog". if attrType == nil { attrType = fn.Pkg().Scope().Lookup("Attr").Type() } if isMethodExpr(pass.TypesInfo, call) { // Call is to a method value. Skip the first argument. skipArgs++ } if len(call.Args) <= skipArgs { // Too few args; perhaps there are no k-v pairs. return } // Check this call. // The first position should hold a key or Attr. pos := key var unknownArg ast.Expr // nil or the last unknown argument for _, arg := range call.Args[skipArgs:] { t := pass.TypesInfo.Types[arg].Type switch pos { case key: // Expect a string or Attr. switch { case t == stringType: pos = value case isAttr(t): pos = key case types.IsInterface(t): // As we do not do dataflow, we do not know what the dynamic type is. // But we might be able to learn enough to make a decision. if types.AssignableTo(stringType, t) { // t must be an empty interface. So it can also be an Attr. // We don't know enough to make an assumption. pos = unknown continue } else if attrType != nil && types.AssignableTo(attrType, t) { // Assume it is an Attr. pos = key continue } // Can't be either a string or Attr. Definitely an error. fallthrough default: if unknownArg == nil { pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)", shortName(fn), analysisutil.Format(pass.Fset, arg)) } else { pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)", shortName(fn), analysisutil.Format(pass.Fset, arg), analysisutil.Format(pass.Fset, unknownArg)) } // Stop here so we report at most one missing key per call. return } case value: // Anything can appear in this position. // The next position should be a key. pos = key case unknown: // Once we encounter an unknown position, we can never be // sure if a problem later or at the end of the call is due to a // missing final value, or a non-key in key position. // In both cases, unknownArg != nil. unknownArg = arg // We don't know what is expected about this position, but all hope is not lost. if t != stringType && !isAttr(t) && !types.IsInterface(t) { // This argument is definitely not a key. // // unknownArg cannot have been a key, in which case this is the // corresponding value, and the next position should hold another key. pos = key } } } if pos == value { if unknownArg == nil { pass.ReportRangef(call, "call to %s missing a final value", shortName(fn)) } else { pass.ReportRangef(call, "call to %s has a missing or misplaced value", shortName(fn)) } } }) return nil, nil } func isAttr(t types.Type) bool { return analysisutil.IsNamedType(t, "log/slog", "Attr") } // shortName returns a name for the function that is shorter than FullName. // Examples: // // "slog.Info" (instead of "log/slog.Info") // "slog.Logger.With" (instead of "(*log/slog.Logger).With") func shortName(fn *types.Func) string { var r string if recv := fn.Type().(*types.Signature).Recv(); recv != nil { if _, named := typesinternal.ReceiverNamed(recv); named != nil { r = named.Obj().Name() } else { r = recv.Type().String() // anon struct/interface } r += "." } return fmt.Sprintf("%s.%s%s", fn.Pkg().Name(), r, fn.Name()) } // If fn is a slog function that has a ...any parameter for key-value pairs, // kvFuncSkipArgs returns the number of arguments to skip over to reach the // corresponding arguments, and true. // Otherwise it returns (0, false). func kvFuncSkipArgs(fn *types.Func) (int, bool) { if pkg := fn.Pkg(); pkg == nil || pkg.Path() != "log/slog" { return 0, false } var recvName string // by default a slog package function if recv := fn.Type().(*types.Signature).Recv(); recv != nil { _, named := typesinternal.ReceiverNamed(recv) if named == nil { return 0, false // anon struct/interface } recvName = named.Obj().Name() } skip, ok := kvFuncs[recvName][fn.Name()] return skip, ok } // The names of functions and methods in log/slog that take // ...any for key-value pairs, mapped to the number of initial args to skip in // order to get to the ones that match the ...any parameter. // The first key is the dereferenced receiver type name, or "" for a function. var kvFuncs = map[string]map[string]int{ "": map[string]int{ "Debug": 1, "Info": 1, "Warn": 1, "Error": 1, "DebugContext": 2, "InfoContext": 2, "WarnContext": 2, "ErrorContext": 2, "Log": 3, "Group": 1, }, "Logger": map[string]int{ "Debug": 1, "Info": 1, "Warn": 1, "Error": 1, "DebugContext": 2, "InfoContext": 2, "WarnContext": 2, "ErrorContext": 2, "Log": 3, "With": 0, }, "Record": map[string]int{ "Add": 0, }, } // isMethodExpr reports whether a call is to a MethodExpr. func isMethodExpr(info *types.Info, c *ast.CallExpr) bool { s, ok := c.Fun.(*ast.SelectorExpr) if !ok { return false } sel := info.Selections[s] return sel != nil && sel.Kind() == types.MethodExpr }
go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go", "repo_id": "go", "token_count": 2867 }
196
// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package unreachable defines an Analyzer that checks for unreachable code. // // # Analyzer unreachable // // unreachable: check for unreachable code // // The unreachable analyzer finds statements that execution can never reach // because they are preceded by an return statement, a call to panic, an // infinite loop, or similar constructs. package unreachable
go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go/0
{ "file_path": "go/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go", "repo_id": "go", "token_count": 125 }
197
// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package markdown import ( "bytes" "fmt" "strings" ) type List struct { Position Bullet rune Start int Loose bool Items []Block // always *Item } type Item struct { Position Blocks []Block width int } func (b *List) PrintHTML(buf *bytes.Buffer) { if b.Bullet == '.' || b.Bullet == ')' { buf.WriteString("<ol") if b.Start != 1 { fmt.Fprintf(buf, " start=\"%d\"", b.Start) } buf.WriteString(">\n") } else { buf.WriteString("<ul>\n") } for _, c := range b.Items { c.PrintHTML(buf) } if b.Bullet == '.' || b.Bullet == ')' { buf.WriteString("</ol>\n") } else { buf.WriteString("</ul>\n") } } func (b *List) printMarkdown(buf *bytes.Buffer, s mdState) { if buf.Len() > 0 && buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } s.bullet = b.Bullet s.num = b.Start for i, item := range b.Items { if i > 0 && b.Loose { buf.WriteByte('\n') } item.printMarkdown(buf, s) s.num++ } } func (b *Item) printMarkdown(buf *bytes.Buffer, s mdState) { var marker string if s.bullet == '.' || s.bullet == ')' { marker = fmt.Sprintf("%d%c ", s.num, s.bullet) } else { marker = fmt.Sprintf("%c ", s.bullet) } marker = strings.Repeat(" ", b.width-len(marker)) + marker s.prefix1 = s.prefix + marker s.prefix += strings.Repeat(" ", len(marker)) printMarkdownBlocks(b.Blocks, buf, s) } func (b *Item) PrintHTML(buf *bytes.Buffer) { buf.WriteString("<li>") if len(b.Blocks) > 0 { if _, ok := b.Blocks[0].(*Text); !ok { buf.WriteString("\n") } } for i, c := range b.Blocks { c.PrintHTML(buf) if i+1 < len(b.Blocks) { if _, ok := c.(*Text); ok { buf.WriteString("\n") } } } buf.WriteString("</li>\n") } type listBuilder struct { bullet rune num int loose bool item *itemBuilder todo func() line } func (b *listBuilder) build(p buildState) Block { blocks := p.blocks() pos := p.pos() // list can have wrong pos b/c extend dance. pos.EndLine = blocks[len(blocks)-1].Pos().EndLine Loose: for i, c := range blocks { c := c.(*Item) if i+1 < len(blocks) { if blocks[i+1].Pos().StartLine-c.EndLine > 1 { b.loose = true break Loose } } for j, d := range c.Blocks { endLine := d.Pos().EndLine if j+1 < len(c.Blocks) { if c.Blocks[j+1].Pos().StartLine-endLine > 1 { b.loose = true break Loose } } } } if !b.loose { for _, c := range blocks { c := c.(*Item) for i, d := range c.Blocks { if p, ok := d.(*Paragraph); ok { c.Blocks[i] = p.Text } } } } return &List{ pos, b.bullet, b.num, b.loose, p.blocks(), } } func (b *itemBuilder) build(p buildState) Block { b.list.item = nil return &Item{p.pos(), p.blocks(), b.width} } func (c *listBuilder) extend(p *parseState, s line) (line, bool) { d := c.item if d != nil && s.trimSpace(d.width, d.width, true) || d == nil && s.isBlank() { return s, true } return s, false } func (c *itemBuilder) extend(p *parseState, s line) (line, bool) { if s.isBlank() && !c.haveContent { return s, false } if s.isBlank() { // Goldmark does this and apparently commonmark.js too. // Not sure why it is necessary. return line{}, true } if !s.isBlank() { c.haveContent = true } return s, true } func newListItem(p *parseState, s line) (line, bool) { if list, ok := p.curB().(*listBuilder); ok && list.todo != nil { s = list.todo() list.todo = nil return s, true } if p.startListItem(&s) { return s, true } return s, false } func (p *parseState) startListItem(s *line) bool { t := *s n := 0 for i := 0; i < 3; i++ { if !t.trimSpace(1, 1, false) { break } n++ } bullet := t.peek() var num int Switch: switch bullet { default: return false case '-', '*', '+': t.trim(bullet) n++ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': for j := t.i; ; j++ { if j >= len(t.text) { return false } c := t.text[j] if c == '.' || c == ')' { // success bullet = c j++ n += j - t.i t.i = j break Switch } if c < '0' || '9' < c { return false } if j-t.i >= 9 { return false } num = num*10 + int(c) - '0' } } if !t.trimSpace(1, 1, true) { return false } n++ tt := t m := 0 for i := 0; i < 3 && tt.trimSpace(1, 1, false); i++ { m++ } if !tt.trimSpace(1, 1, true) { n += m t = tt } // point of no return var list *listBuilder if c, ok := p.nextB().(*listBuilder); ok { list = c } if list == nil || list.bullet != rune(bullet) { // “When the first list item in a list interrupts a paragraph—that is, // when it starts on a line that would otherwise count as // paragraph continuation text—then (a) the lines Ls must // not begin with a blank line, // and (b) if the list item is ordered, the start number must be 1.” if list == nil && p.para() != nil && (t.isBlank() || (bullet == '.' || bullet == ')') && num != 1) { // Goldmark and Dingus both seem to get this wrong // (or the words above don't mean what we think they do). // when the paragraph that could be continued // is inside a block quote. // See testdata/extra.txt 117.md. p.corner = true return false } list = &listBuilder{bullet: rune(bullet), num: num} p.addBlock(list) } b := &itemBuilder{list: list, width: n, haveContent: !t.isBlank()} list.todo = func() line { p.addBlock(b) list.item = b return t } return true } // GitHub task list extension func (p *parseState) taskList(list *List) { for _, item := range list.Items { item := item.(*Item) if len(item.Blocks) == 0 { continue } var text *Text switch b := item.Blocks[0].(type) { default: continue case *Paragraph: text = b.Text case *Text: text = b } if len(text.Inline) < 1 { continue } pl, ok := text.Inline[0].(*Plain) if !ok { continue } s := pl.Text if len(s) < 4 || s[0] != '[' || s[2] != ']' || (s[1] != ' ' && s[1] != 'x' && s[1] != 'X') { continue } if s[3] != ' ' && s[3] != '\t' { p.corner = true // goldmark does not require the space continue } text.Inline = append([]Inline{&Task{Checked: s[1] == 'x' || s[1] == 'X'}, &Plain{Text: s[len("[x]"):]}}, text.Inline[1:]...) } } func ins(first Inline, x []Inline) []Inline { x = append(x, nil) copy(x[1:], x) x[0] = first return x } type Task struct { Checked bool } func (x *Task) Inline() { } func (x *Task) PrintHTML(buf *bytes.Buffer) { buf.WriteString("<input ") if x.Checked { buf.WriteString(`checked="" `) } buf.WriteString(`disabled="" type="checkbox">`) } func (x *Task) printMarkdown(buf *bytes.Buffer) { x.PrintText(buf) } func (x *Task) PrintText(buf *bytes.Buffer) { buf.WriteByte('[') if x.Checked { buf.WriteByte('x') } else { buf.WriteByte(' ') } buf.WriteByte(']') buf.WriteByte(' ') } func listCorner(list *List) bool { for _, item := range list.Items { item := item.(*Item) if len(item.Blocks) == 0 { // Goldmark mishandles what follows; see testdata/extra.txt 111.md. return true } switch item.Blocks[0].(type) { case *List, *ThematicBreak, *CodeBlock: // Goldmark mishandles a list with various block items inside it. return true } } return false }
go/src/cmd/vendor/rsc.io/markdown/list.go/0
{ "file_path": "go/src/cmd/vendor/rsc.io/markdown/list.go", "repo_id": "go", "token_count": 3293 }
198
module rangeloop go 1.21
go/src/cmd/vet/testdata/rangeloop/go.mod/0
{ "file_path": "go/src/cmd/vet/testdata/rangeloop/go.mod", "repo_id": "go", "token_count": 11 }
199