use crate::dsl::{Customization::*, Feature::*, Inst, Length::*, Location::*, TupleType::*};
use crate::dsl::{align, evex, fmt, inst, r, rex, rw, vex, w};

#[rustfmt::skip] // Keeps instructions on a single line.
pub fn list() -> Vec<Inst> {
    vec![
        // Scalar arithmetic right shifts.
        inst("sarb", fmt("MC", [rw(rm8), r(cl)]), rex([0xD2]).digit(7), _64b | compat),
        inst("sarb", fmt("MI", [rw(rm8), r(imm8)]), rex([0xC0]).digit(7).ib(), _64b | compat),
        inst("sarb", fmt("M1", [rw(rm8)]), rex([0xD0]).digit(7).ib(), _64b | compat).custom(Display),
        inst("sarw", fmt("MC", [rw(rm16), r(cl)]), rex([0x66, 0xD3]).digit(7), _64b | compat),
        inst("sarw", fmt("MI", [rw(rm16), r(imm8)]), rex([0x66, 0xC1]).digit(7).ib(), _64b | compat),
        inst("sarw", fmt("M1", [rw(rm16)]), rex([0x66, 0xD1]).digit(7).ib(), _64b | compat).custom(Display),
        inst("sarl", fmt("MC", [rw(rm32), r(cl)]), rex([0xD3]).digit(7), _64b | compat),
        inst("sarl", fmt("MI", [rw(rm32), r(imm8)]), rex([0xC1]).digit(7).ib(), _64b | compat),
        inst("sarl", fmt("M1", [rw(rm32)]), rex([0xD1]).digit(7).ib(), _64b | compat).custom(Display),
        inst("sarq", fmt("MC", [rw(rm64), r(cl)]), rex([0xD3]).digit(7).w(), _64b),
        inst("sarq", fmt("MI", [rw(rm64), r(imm8)]), rex([0xC1]).digit(7).ib().w(), _64b),
        inst("sarq", fmt("M1", [rw(rm64)]), rex([0xD1]).digit(7).ib().w(), _64b).custom(Display),
        // Scalar logical left shifts.
        inst("shlb", fmt("MC", [rw(rm8), r(cl)]), rex([0xD2]).digit(4), _64b | compat),
        inst("shlb", fmt("MI", [rw(rm8), r(imm8)]), rex([0xC0]).digit(4).ib(), _64b | compat),
        inst("shlb", fmt("M1", [rw(rm8)]), rex([0xD0]).digit(4).ib(), _64b | compat).custom(Display),
        inst("shlw", fmt("MC", [rw(rm16), r(cl)]), rex([0x66, 0xD3]).digit(4), _64b | compat),
        inst("shlw", fmt("MI", [rw(rm16), r(imm8)]), rex([0x66, 0xC1]).digit(4).ib(), _64b | compat),
        inst("shlw", fmt("M1", [rw(rm16)]), rex([0x66, 0xD1]).digit(4).ib(), _64b | compat).custom(Display),
        inst("shll", fmt("MC", [rw(rm32), r(cl)]), rex([0xD3]).digit(4), _64b | compat),
        inst("shll", fmt("MI", [rw(rm32), r(imm8)]), rex([0xC1]).digit(4).ib(), _64b | compat),
        inst("shll", fmt("M1", [rw(rm32)]), rex([0xD1]).digit(4).ib(), _64b | compat).custom(Display),
        inst("shlq", fmt("MC", [rw(rm64), r(cl)]), rex([0xD3]).digit(4).w(), _64b),
        inst("shlq", fmt("MI", [rw(rm64), r(imm8)]), rex([0xC1]).digit(4).ib().w(), _64b),
        inst("shlq", fmt("M1", [rw(rm64)]), rex([0xD1]).digit(4).ib().w(), _64b).custom(Display),
        // Scalar logical right shifts.
        inst("shrb", fmt("MC", [rw(rm8), r(cl)]), rex([0xD2]).digit(5), _64b | compat),
        inst("shrb", fmt("MI", [rw(rm8), r(imm8)]), rex([0xC0]).digit(5).ib(), _64b | compat),
        inst("shrb", fmt("M1", [rw(rm8)]), rex([0xD0]).digit(5).ib(), _64b | compat).custom(Display),
        inst("shrw", fmt("MC", [rw(rm16), r(cl)]), rex([0x66, 0xD3]).digit(5), _64b | compat),
        inst("shrw", fmt("MI", [rw(rm16), r(imm8)]), rex([0x66, 0xC1]).digit(5).ib(), _64b | compat),
        inst("shrw", fmt("M1", [rw(rm16)]), rex([0x66, 0xD1]).digit(5).ib(), _64b | compat).custom(Display),
        inst("shrl", fmt("MC", [rw(rm32), r(cl)]), rex([0xD3]).digit(5), _64b | compat),
        inst("shrl", fmt("MI", [rw(rm32), r(imm8)]), rex([0xC1]).digit(5).ib(), _64b | compat),
        inst("shrl", fmt("M1", [rw(rm32)]), rex([0xD1]).digit(5).ib(), _64b | compat).custom(Display),
        inst("shrq", fmt("MC", [rw(rm64), r(cl)]), rex([0xD3]).digit(5).w(), _64b),
        inst("shrq", fmt("MI", [rw(rm64), r(imm8)]), rex([0xC1]).digit(5).ib().w(), _64b),
        inst("shrq", fmt("M1", [rw(rm64)]), rex([0xD1]).digit(5).ib().w(), _64b).custom(Display),
        inst("rolb", fmt("MC", [rw(rm8), r(cl)]), rex([0xD2]).digit(0), _64b | compat),
        inst("rolb", fmt("MI", [rw(rm8), r(imm8)]), rex([0xC0]).digit(0).ib(), _64b | compat),
        inst("rolb", fmt("M1", [rw(rm8)]), rex([0xD0]).digit(0).ib(), _64b | compat).custom(Display),
        inst("rolw", fmt("MC", [rw(rm16), r(cl)]), rex([0x66, 0xD3]).digit(0), _64b | compat),
        inst("rolw", fmt("MI", [rw(rm16), r(imm8)]), rex([0x66, 0xC1]).digit(0).ib(), _64b | compat),
        inst("rolw", fmt("M1", [rw(rm16)]), rex([0x66, 0xD1]).digit(0).ib(), _64b | compat).custom(Display),
        inst("roll", fmt("MC", [rw(rm32), r(cl)]), rex([0xD3]).digit(0), _64b | compat),
        inst("roll", fmt("MI", [rw(rm32), r(imm8)]), rex([0xC1]).digit(0).ib(), _64b | compat),
        inst("roll", fmt("M1", [rw(rm32)]), rex([0xD1]).digit(0).ib(), _64b | compat).custom(Display),
        inst("rolq", fmt("MC", [rw(rm64), r(cl)]), rex([0xD3]).digit(0).w(), _64b),
        inst("rolq", fmt("MI", [rw(rm64), r(imm8)]), rex([0xC1]).digit(0).ib().w(), _64b),
        inst("rolq", fmt("M1", [rw(rm64)]), rex([0xD1]).digit(0).ib().w(), _64b).custom(Display),
        inst("rorb", fmt("MC", [rw(rm8), r(cl)]), rex([0xD2]).digit(1), _64b | compat),
        inst("rorb", fmt("MI", [rw(rm8), r(imm8)]), rex([0xC0]).digit(1).ib(), _64b | compat),
        inst("rorb", fmt("M1", [rw(rm8)]), rex([0xD0]).digit(1).ib(), _64b | compat).custom(Display),
        inst("rorw", fmt("MC", [rw(rm16), r(cl)]), rex([0x66, 0xD3]).digit(1), _64b | compat),
        inst("rorw", fmt("MI", [rw(rm16), r(imm8)]), rex([0x66, 0xC1]).digit(1).ib(), _64b | compat),
        inst("rorw", fmt("M1", [rw(rm16)]), rex([0x66, 0xD1]).digit(1).ib(), _64b | compat).custom(Display),
        inst("rorl", fmt("MC", [rw(rm32), r(cl)]), rex([0xD3]).digit(1), _64b | compat),
        inst("rorl", fmt("MI", [rw(rm32), r(imm8)]), rex([0xC1]).digit(1).ib(), _64b | compat),
        inst("rorl", fmt("M1", [rw(rm32)]), rex([0xD1]).digit(1).ib(), _64b | compat).custom(Display),
        inst("rorq", fmt("MC", [rw(rm64), r(cl)]), rex([0xD3]).digit(1).w(), _64b),
        inst("rorq", fmt("MI", [rw(rm64), r(imm8)]), rex([0xC1]).digit(1).ib().w(), _64b),
        inst("rorq", fmt("M1", [rw(rm64)]), rex([0xD1]).digit(1).ib().w(), _64b).custom(Display),

        inst("shldw", fmt("MRI", [rw(rm16), r(r16), r(imm8)]), rex([0x66, 0x0F, 0xA4]).ib(), _64b | compat),
        inst("shldw", fmt("MRC", [rw(rm16), r(r16), r(cl)]), rex([0x66, 0x0F, 0xA5]).ib(), _64b | compat),
        inst("shldl", fmt("MRI", [rw(rm32), r(r32), r(imm8)]), rex([0x0F, 0xA4]).ib(), _64b | compat),
        inst("shldq", fmt("MRI", [rw(rm64), r(r64), r(imm8)]), rex([0x0F, 0xA4]).ib().w(), _64b),
        inst("shldl", fmt("MRC", [rw(rm32), r(r32), r(cl)]), rex([0x0F, 0xA5]).ib(), _64b | compat),
        inst("shldq", fmt("MRC", [rw(rm64), r(r64), r(cl)]), rex([0x0F, 0xA5]).ib().w(), _64b),

        // BMI2 shifts
        inst("sarxl", fmt("RMV", [w(r32a), r(rm32), r(r32b)]), vex(LZ)._f3()._0f38().w0().op(0xF7), _64b | compat | bmi2),
        inst("shlxl", fmt("RMV", [w(r32a), r(rm32), r(r32b)]), vex(LZ)._66()._0f38().w0().op(0xF7), _64b | compat | bmi2),
        inst("shrxl", fmt("RMV", [w(r32a), r(rm32), r(r32b)]), vex(LZ)._f2()._0f38().w0().op(0xF7), _64b | compat | bmi2),
        inst("sarxq", fmt("RMV", [w(r64a), r(rm64), r(r64b)]), vex(LZ)._f3()._0f38().w1().op(0xF7), _64b | bmi2),
        inst("shlxq", fmt("RMV", [w(r64a), r(rm64), r(r64b)]), vex(LZ)._66()._0f38().w1().op(0xF7), _64b | bmi2),
        inst("shrxq", fmt("RMV", [w(r64a), r(rm64), r(r64b)]), vex(LZ)._f2()._0f38().w1().op(0xF7), _64b | bmi2),
        inst("rorxl", fmt("RMI", [w(r32), r(rm32), r(imm8)]), vex(LZ)._f2()._0f3a().w0().op(0xF0).r().ib(), _64b | compat | bmi2),
        inst("rorxq", fmt("RMI", [w(r64), r(rm64), r(imm8)]), vex(LZ)._f2()._0f3a().w1().op(0xF0).r().ib(), _64b | bmi2),

        // Vector instructions (shift left).
        inst("psllw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xF1]).r(), _64b | compat | sse2).alt(avx, "vpsllw_c"),
        inst("psllw", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x71]).digit(6).ib(), _64b | compat | sse2).alt(avx, "vpsllw_d"),
        inst("pslld", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xF2]).r(), _64b | compat | sse2).alt(avx, "vpslld_c"),
        inst("pslld", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x72]).digit(6).ib(), _64b | compat | sse2).alt(avx, "vpslld_d"),
        inst("psllq", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xF3]).r(), _64b | compat | sse2).alt(avx, "vpsllq_c"),
        inst("psllq", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x73]).digit(6).ib(), _64b | compat | sse2).alt(avx, "vpsllq_d"),
        inst("vpsllw", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xF1).r(), _64b | compat | avx),
        inst("vpsllw", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x71).digit(6).ib(), _64b | compat | avx),
        inst("vpslld", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xF2).r(), _64b | compat | avx),
        inst("vpslld", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x72).digit(6).ib(), _64b | compat | avx),
        inst("vpsllq", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xF3).r(), _64b | compat | avx),
        inst("vpsllq", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x73).digit(6).ib(), _64b | compat | avx),
        // FIXME: uncomment once the avx512bw feature is bound
        // inst("vpsllw", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().wig().op(0xF1).r(), _64b | compat | avx512vl | avx512bw),
        // inst("vpsllw", fmt("E", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, FullMem)._66()._0f().wig().op(0x71).digit(6).ib(), _64b | compat | avx512vl | avx512bw),
        inst("vpslld", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w0().op(0xF2).r(), _64b | compat | avx512vl | avx512f),
        inst("vpslld", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w0().op(0x72).digit(6).ib(), _64b | compat | avx512vl | avx512f),
        inst("vpsllq", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w1().op(0xF3).r(), _64b | compat | avx512vl | avx512f),
        inst("vpsllq", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w1().op(0x73).digit(6).ib(), _64b | compat | avx512vl | avx512f),

        // Vector instructions (shift right).
        inst("psraw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xE1]).r(), _64b | compat | sse2).alt(avx, "vpsraw_c"),
        inst("psraw", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x71]).digit(4).ib(), _64b | compat | sse2).alt(avx, "vpsraw_d"),
        inst("psrad", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xE2]).r(), _64b | compat | sse2).alt(avx, "vpsrad_c"),
        inst("psrad", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x72]).digit(4).ib(), _64b | compat | sse2).alt(avx, "vpsrad_d"),
        inst("psrlw", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xD1]).r(), _64b | compat | sse2).alt(avx, "vpsrlw_c"),
        inst("psrlw", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x71]).digit(2).ib(), _64b | compat | sse2).alt(avx, "vpsrlw_d"),
        inst("psrld", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xD2]).r(), _64b | compat | sse2).alt(avx, "vpsrld_c"),
        inst("psrld", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x72]).digit(2).ib(), _64b | compat | sse2).alt(avx, "vpsrld_d"),
        inst("psrlq", fmt("A", [rw(xmm1), r(align(xmm_m128))]), rex([0x66, 0x0F, 0xD3]).r(), _64b | compat | sse2).alt(avx, "vpsrlq_c"),
        inst("psrlq", fmt("B", [rw(xmm1), r(imm8)]), rex([0x66, 0x0F, 0x73]).digit(2).ib(), _64b | compat | sse2).alt(avx, "vpsrlq_d"),
        inst("vpsraw", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xE1).r(), _64b | compat | avx),
        inst("vpsraw", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x71).digit(4).ib(), _64b | compat | avx),
        inst("vpsrad", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xE2).r(), _64b | compat | avx),
        inst("vpsrad", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x72).digit(4).ib(), _64b | compat | avx),
        inst("vpsrlw", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xD1).r(), _64b | compat | avx),
        inst("vpsrlw", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x71).digit(2).ib(), _64b | compat | avx),
        inst("vpsrld", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xD2).r(), _64b | compat | avx),
        inst("vpsrld", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x72).digit(2).ib(), _64b | compat | avx),
        inst("vpsrlq", fmt("C", [w(xmm1), r(xmm2), r(xmm_m128)]), vex(L128)._66()._0f().op(0xD3).r(), _64b | compat | avx),
        inst("vpsrlq", fmt("D", [w(xmm1), r(xmm2), r(imm8)]), vex(L128)._66()._0f().op(0x73).digit(2).ib(), _64b | compat | avx),
        // FIXME: uncomment once the avx512bw feature is bound
        // inst("vpsraw", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().wig().op(0xE1).r(), _64b | compat | avx512vl | avx512bw),
        // inst("vpsraw", fmt("E", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, FullMem)._66()._0f().wig().op(0x71).digit(4).ib(), _64b | compat | avx512vl | avx512bw),
        inst("vpsrad", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w0().op(0xE2).r(), _64b | compat | avx512vl | avx512f),
        inst("vpsrad", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w0().op(0x72).digit(4).ib(), _64b | compat | avx512vl | avx512f),
        inst("vpsraq", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w1().op(0xE2).r(), _64b | compat | avx512vl | avx512f),
        inst("vpsraq", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w1().op(0x72).digit(4).ib(), _64b | compat | avx512vl | avx512f),
        // FIXME: uncomment once the avx512bw feature is bound
        // inst("vpsrlw", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().wig().op(0xD1).r(), _64b | compat | avx512vl | avx512bw),
        // inst("vpsrlw", fmt("E", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, FullMem)._66()._0f().wig().op(0x71).digit(2).ib(), _64b | compat | avx512vl | avx512bw),
        inst("vpsrld", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w0().op(0xD2).r(), _64b | compat | avx512vl | avx512f),
        inst("vpsrld", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w0().op(0x72).digit(2).ib(), _64b | compat | avx512vl | avx512f),
        inst("vpsrlq", fmt("G", [w(xmm1), r(xmm2), r(xmm_m128)]), evex(L128, Mem128)._66()._0f().w1().op(0xD3).r(), _64b | compat | avx512vl | avx512f),
        inst("vpsrlq", fmt("F", [w(xmm1), r(xmm_m128), r(imm8)]), evex(L128, Full)._66()._0f().w1().op(0x73).digit(2).ib(), _64b | compat | avx512vl | avx512f),
    ]
}
