# Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
# SPDX-License-Identifier: MIT
# generate kernel instances to speed up compilation
import copy
import fnmatch
import itertools
from collections import OrderedDict
from dataclasses import dataclass
from pathlib import Path
from typing import List, Tuple, Dict, Literal, Any

from codegen.arch import ArchTrait, get_factories_for_targets
from codegen.cmake_config import GEN_DIR
from codegen.cpp_symbol_map import (
    get_mask_check_map,
    BIAS_CHECK_MAP,
    DROPOUT_CHECK_MAP,
    MODE_MAP,
    get_mask_map,
    BIAS_MAP,
    DROPOUT_MAP,
    BWD_DTYPE_MAP,
    BOOL_MAP,
)
from codegen.utils import check_duplicates_and_paddings, if_, indent, update_file


FMHA_BWD_KERNEL_HEADER = """// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.\n
// auto generated by generate.py
#include "fmha_bwd.hpp"
"""

FMHA_BWD_DQ_DK_DV_KERNEL_BODY = """
#include <iostream>

#if !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})

using fmha_dtype_{F_idx} = {F_dtype};

using fmha_block_tile_{F_idx} = ck_tile::
    sequence<{F_bm0}, {F_bn0}, {F_bk0}, {F_bk1}, {F_bk2}, {F_bk3}, {F_bk4}, {F_bhdq}, {F_bhdv}>;
using fmha_block_warps0_{F_idx} = ck_tile::sequence<{F_rm0}, {F_rn0}, {F_rk0}>;
using fmha_block_warps1_{F_idx} = ck_tile::sequence<{F_rm1}, {F_rn1}, {F_rk1}>;
using fmha_block_warps2_{F_idx} = ck_tile::sequence<{F_rm2}, {F_rn2}, {F_rk2}>;
using fmha_warp_tile0_{F_idx}   = ck_tile::sequence<{F_wm0}, {F_wn0}, {F_wk0}>;
using fmha_warp_tile1_{F_idx}   = ck_tile::sequence<{F_wm1}, {F_wn1}, {F_wk1}>;
using fmha_warp_tile2_{F_idx}   = ck_tile::sequence<{F_wm0}, {F_wn0}, ck_tile::min({F_wk0}, {F_bk4})>;

// TODO: simplify Gemm0~4BlockWarps in TileFmhaBwdShape
//       G0&G2 -> GSdP
//       G1&G3 -> GdKV
//       G4    -> GdQ
using fmha_bwd_shape_{F_idx} = ck_tile::TileFmhaBwdShape<fmha_block_tile_{F_idx},
                                                         fmha_block_warps0_{F_idx},
                                                         fmha_warp_tile0_{F_idx},
                                                         fmha_block_warps1_{F_idx},
                                                         fmha_warp_tile1_{F_idx},
                                                         fmha_block_warps0_{F_idx},
                                                         fmha_warp_tile0_{F_idx},
                                                         fmha_block_warps1_{F_idx},
                                                         fmha_warp_tile1_{F_idx},
                                                         fmha_block_warps2_{F_idx},
                                                         fmha_warp_tile2_{F_idx},
                                                         {F_maxq}>;

using fmha_bwd_trait_{F_idx} = ck_tile::TileFmhaBwdTraits<{F_dpad},
                                                       {F_dvpad},
                                                       {F_bias},
                                                       {F_dbias},
                                                       {F_occupancy}>;
using fmha_mask_{F_idx}      = {F_mask};
using fmha_dropout_{F_idx}   = {F_dropout};

using fmha_bwd_pipeline_problem_{F_idx} = ck_tile::BlockFmhaBwdPipelineProblem<
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::QDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::KDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::VDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::GemmDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::LSEDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::AccDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::DDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::BiasDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::RandValOutputDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::ODataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::OGradDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::QGradDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::KGradDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::VGradDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::BiasGradDataType,
    fmha_bwd_shape_{F_idx},
    {F_mode},
    {F_deterministic},
    fmha_mask_{F_idx},
    fmha_dropout_{F_idx},
    {F_trload},
    fmha_bwd_trait_{F_idx}>;

using fmha_bwd_pipeline_{F_idx} = ck_tile::BlockFmhaBwdDQDKDVPipeline<fmha_bwd_pipeline_problem_{F_idx}>;

using fmha_bwd_dk_epilogue_{F_idx} = ck_tile::Default2DEpilogue<
    ck_tile::Default2DEpilogueProblem<typename FmhaBwdTypeConfig<{F_dtype}>::AccDataType,
                                      typename FmhaBwdTypeConfig<{F_dtype}>::KGradDataType,
                                      false,
                                      ({F_dpad} > 0)>>;

using fmha_bwd_dv_epilogue_{F_idx} = ck_tile::Default2DEpilogue<
    ck_tile::Default2DEpilogueProblem<typename FmhaBwdTypeConfig<{F_dtype}>::AccDataType,
                                      typename FmhaBwdTypeConfig<{F_dtype}>::VGradDataType,
                                      false,
                                      ({F_dvpad} > 0)>>;

using fmha_bwd_dq_epilogue_{F_idx} = ck_tile::Default2DEpilogue<
    ck_tile::Default2DEpilogueProblem<typename FmhaBwdTypeConfig<{F_dtype}>::AccDataType,
                                      typename FmhaBwdTypeConfig<{F_dtype}>::QGradDataType,
                                      false,
                                      ({F_dpad} > 0)>>;

using fmha_bwd_dq_dk_dv_kernel_{F_idx} =
    ck_tile::FmhaBwdDQDKDVKernel<fmha_bwd_pipeline_{F_idx},
                                 fmha_bwd_dk_epilogue_{F_idx},
                                 fmha_bwd_dv_epilogue_{F_idx},
                                 fmha_bwd_dq_epilogue_{F_idx}>;

using dq_dk_dv_trait_{F_idx} = fmha_bwd_dq_dk_dv_traits_<{F_hdim},
                                                         {F_dtype},
                                                         {F_mode},
                                                         fmha_mask_{F_idx},
                                                         fmha_dropout_{F_idx},
                                                         {F_bias},
                                                         {F_dbias},
                                                         {F_dpad},
                                                         {F_dvpad},
                                                         {F_deterministic},
                                                         {F_trload},
                                                         {F_maxq},
                                                         {F_bn0}>;

template <>
float fmha_bwd_dq_dk_dv_<dq_dk_dv_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_ = fmha_bwd_dq_dk_dv_kernel_{F_idx};
    if(s.log_level_ > 0)
        std::cout << ", " << k_::GetName() << std::flush;
    auto [kargs, grids]                    = fmha_bwd_dq_dk_dv_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    return ck_tile::launch_kernel(
        s, ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs));
}}

template <>
void fmha_bwd_dq_dk_dv_oneshot_<dq_dk_dv_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_                               = fmha_bwd_dq_dk_dv_kernel_{F_idx};
    auto [kargs, grids]                    = fmha_bwd_dq_dk_dv_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs)(
        ck_tile::stream_config{{s.stream_id_}});
}}

template <>
int fmha_bwd_dq_dk_dv_maxq_<dq_dk_dv_trait_{F_idx}, {F_arch.tag}>()
{{
    using k_ = fmha_bwd_dq_dk_dv_kernel_{F_idx};
    return k_::kMaxSeqLenQ;
}}

template <>
std::string fmha_bwd_dq_dk_dv_get_name_<dq_dk_dv_trait_{F_idx}, {F_arch.tag}>()
{{
    using k_ = fmha_bwd_dq_dk_dv_kernel_{F_idx};
    return k_::GetName();
}}

#endif // !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})
"""

FMHA_BWD_API_FILENAME = "fmha_bwd_api.cpp"
FMHA_BWD_API = """
#include <iostream>

template <typename dot_do_o_trait_, typename dq_dk_dv_trait_, typename convert_dq_trait_, typename Arch>
float fmha_bwd_(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    if constexpr (!std::is_same_v<convert_dq_trait_, void>)
    {{
        if(s.log_level_ > 0)
            std::cout << ", " << fmha_bwd_dot_do_o_get_name_<dot_do_o_trait_, Arch>() << "@" << fmha_bwd_convert_dq_get_name_<convert_dq_trait_, Arch>() << "@" << fmha_bwd_dq_dk_dv_get_name_<dq_dk_dv_trait_, Arch>() << std::flush;
        return ck_tile::launch_kernel(s,
            [=](const ck_tile::stream_config& s_){{ fmha_bwd_dot_do_o_oneshot_<dot_do_o_trait_, Arch>(s_, a); }},
            [=](const ck_tile::stream_config& s_){{ fmha_bwd_dq_dk_dv_oneshot_<dq_dk_dv_trait_, Arch>(s_, a); }},
            [=](const ck_tile::stream_config& s_){{ fmha_bwd_convert_dq_oneshot_<convert_dq_trait_, Arch>(s_, a); }}
        );
    }}
    else
    {{
        if(s.log_level_ > 0)
            std::cout << ", " << fmha_bwd_dot_do_o_get_name_<dot_do_o_trait_, Arch>() << "@" << fmha_bwd_dq_dk_dv_get_name_<dq_dk_dv_trait_, Arch>() << std::flush;
        return ck_tile::launch_kernel(s,
            [=](const ck_tile::stream_config& s_){{ fmha_bwd_dot_do_o_oneshot_<dot_do_o_trait_, Arch>(s_, a); }},
            [=](const ck_tile::stream_config& s_){{ fmha_bwd_dq_dk_dv_oneshot_<dq_dk_dv_trait_, Arch>(s_, a); }}
        );
    }}
}}

template <>
float fmha_bwd<2>(fmha_bwd_traits t, fmha_bwd_args a, const ck_tile::stream_config& s){{
    [[maybe_unused]] const std::string device_name = ck_tile::get_device_name();
    float r = -1;
{F_dispatch}
    return r;
}}
"""


def FMHA_BWD_API_COND_STATEMENT(F_cond: str, F_body: str, *, if_i=0) -> str:
    lines = [
        f"{if_(if_i)}({F_cond})",
        "{",
        indent(F_body),
        "}",
    ]
    return "\n".join(lines) + "\n"


FMHA_BWD_API_INNER_DISPATCH = """{F_if}((t.is_group_mode == {F_mode}) && ({F_mask_check}) && (t.bias_type == {F_bias_check}) && (t.has_dbias == {F_dbias}) && ({F_dropout_check}) &&
        ({F_scheck}) && ({F_dcheck}) && ({F_dvcheck}) && (t.is_deterministic == {F_deterministic}){F_max_seq_q_cond}{F_cond_extra}) {{
    using dot_do_o_trait_ = fmha_bwd_dot_do_o_traits_<{F_hdim}, {F_dtype}, {F_mode}, {F_spad1d}, ({F_dvpad} > 0)>;
    using dq_dk_dv_trait_ = fmha_bwd_dq_dk_dv_traits_<{F_hdim}, {F_dtype}, {F_mode}, {F_mask}, {F_dropout}, {F_bias}, {F_dbias}, {F_dpad}, {F_dvpad}, {F_deterministic}, {F_trload}, {F_maxq}, {F_bn0}>;
    using convert_dq_trait_ = fmha_bwd_convert_dq_traits_<{F_hdim}, {F_dtype}, {F_mode}, {F_spad1d}, ({F_dpad} > 0), {F_deterministic}, {F_convert_dq_bn0}>;
    r = fmha_bwd_<dot_do_o_trait_, dq_dk_dv_trait_, std::conditional_t<{F_convert_dq_enabled}, convert_dq_trait_, void>, {F_arch.tag}>(s, a);
    return r;
}}
"""

# M0 size for 1d kernels (dot/convert)
M0_1D = 64


# GEMM0: Q@K=S^T
# GEMM1: P^T@dO^T=dV(This was chosen as G1 to match fwd, but N1 must be equal to headdim_v)
# GEMM2: dO@V=dP^T(This was chosen as G2 because of the calculation order)
# GEMM3: dS^T@Q^T=dK(Similar to G1, but N3 must be equal to headdim_qk)
# GEMM4: dS@K^T=dQ(N4 must be equal to headdim_qk)
# Is it necessary to distinguish between K0~K4?
@dataclass(frozen=True)
class FmhaBwdDQDKDVTileSize:
    F_bm0: int  # tile size along q seqlen (block size)
    F_bn0: int  # tile size along k seqlen
    F_bk0: int  # tile size along gemm0 unroll(F_bhdq)
    F_bk1: int  # tile size along gemm1 unroll(F_bm0)
    F_bk2: int  # tile size along gemm2 unroll(F_bhdv)
    F_bk3: int  # tile size along gemm3 unroll(F_bm0)
    F_bk4: int  # tile size along gemm4 unroll(F_bn0)
    F_bhdq: int  # q head_dim
    F_bhdv: int  # v head_dim
    F_rm0: int  # number of warps along q seqlen (block warps) in gemm0/gemm2
    F_rn0: int  # number of warps along k seqlen (block warps) in gemm0/gemm2
    F_rk0: int  # number of warps along headdim_qk/v (not used) in gemm0/gemm2
    F_rm1: int  # number of warps along k seqlen (block warps) in gemm1/gemm3
    F_rn1: int  # number of warps along headdim_qk/v (block warps) in gemm1/gemm3
    F_rk1: int  # number of warps along q seqlen (not used) in gemm1/gemm3
    F_rm2: int  # number of warps along q seqlen (block warps) in gemm4
    F_rn2: int  # number of warps along headdim_qk (block warps) in gemm4
    F_rk2: int  # number of warps along k seqlen (not used) in gemm4
    F_wm0: int  # warp size along m in gemm0/gemm2/gemm4
    F_wn0: int  # warp size along n in gemm0/gemm2/gemm4
    F_wk0: int  # warp size along k in gemm0/gemm2/gemm4
    F_wm1: int  # warp size along m in gemm1/gemm3
    F_wn1: int  # warp size along n in gemm1/gemm3
    F_wk1: int  # warp size along k in gemm1/gemm3
    F_occupancy: int  # occupancy
    max_seq_q: int = 0

    @property
    def name(self) -> str:
        return (
            f"b{self.F_bm0}x{self.F_bn0}x{self.F_bk0}x{self.F_bk1}x{self.F_bk2}x{self.F_bk3}x{self.F_bk4}x{self.F_bhdq}x{self.F_bhdv}"
            + f"_r{self.F_rm0}x{self.F_rn0}x{self.F_rk0}_r{self.F_rm1}x{self.F_rn1}x{self.F_rk1}_r{self.F_rm2}x{self.F_rn2}x{self.F_rk2}"
            + f"_w{self.F_wm0}x{self.F_wn0}x{self.F_wk0}_w{self.F_wm1}x{self.F_wn1}x{self.F_wk1}_o{self.F_occupancy}_maxq{self.max_seq_q}"
        )


@dataclass(frozen=True)
class FmhaBwdDQDKDVKernel:
    F_arch: ArchTrait
    F_idx: int  # this is not a tunable, but a counter to differentiate symbol
    F_hdim: int  # hdim
    F_dtype: str  # data type
    F_tile: FmhaBwdDQDKDVTileSize
    F_dpad: Literal[0, 8, 1]
    F_dvpad: Literal[0, 8, 1]
    F_bias: str  #
    F_dbias: str  #
    F_dropout: str  #
    F_mask: str  # value from MASK_MAP
    F_mode: str  # value from MODE_MAP
    F_deterministic: str  #
    mask_impl: str  #
    F_trload: str  #

    @property
    def template(self) -> str:
        return FMHA_BWD_KERNEL_HEADER + FMHA_BWD_DQ_DK_DV_KERNEL_BODY.format(
            F_idx=self.F_idx,
            F_arch=self.F_arch,
            F_hdim=self.F_hdim,
            F_dtype=BWD_DTYPE_MAP[self.F_dtype],
            F_bm0=self.F_tile.F_bm0,
            F_bn0=self.F_tile.F_bn0,
            F_bk0=self.F_tile.F_bk0,
            F_bk1=self.F_tile.F_bk1,
            F_bk2=self.F_tile.F_bk2,
            F_bk3=self.F_tile.F_bk3,
            F_bk4=self.F_tile.F_bk4,
            F_bhdq=self.F_tile.F_bhdq,
            F_bhdv=self.F_tile.F_bhdv,
            F_rm0=self.F_tile.F_rm0,
            F_rn0=self.F_tile.F_rn0,
            F_rk0=self.F_tile.F_rk0,
            F_rm1=self.F_tile.F_rm1,
            F_rn1=self.F_tile.F_rn1,
            F_rk1=self.F_tile.F_rk1,
            F_rm2=self.F_tile.F_rm2,
            F_rn2=self.F_tile.F_rn2,
            F_rk2=self.F_tile.F_rk2,
            F_wm0=self.F_tile.F_wm0,
            F_wn0=self.F_tile.F_wn0,
            F_wk0=self.F_tile.F_wk0,
            F_wm1=self.F_tile.F_wm1,
            F_wn1=self.F_tile.F_wn1,
            F_wk1=self.F_tile.F_wk1,
            F_dpad=self.F_dpad,
            F_dvpad=self.F_dvpad,
            F_bias=BIAS_MAP[self.F_bias],
            F_dbias=BOOL_MAP[self.F_dbias],
            F_dropout=DROPOUT_MAP[self.F_dropout],
            F_occupancy=self.F_tile.F_occupancy,
            F_mask=get_mask_map(self.mask_impl)[self.F_mask],
            F_mode=MODE_MAP[self.F_mode],
            F_deterministic=BOOL_MAP[self.F_deterministic],
            F_trload=BOOL_MAP[self.F_trload],
            F_maxq=self.F_tile.max_seq_q,
        )

    @property
    def name(self) -> str:
        def pad_name() -> str:
            n = ""
            if self.F_dpad:
                n += f"d{self.F_dpad}"
            if self.F_dvpad:
                n += f"dv{self.F_dvpad}"
            if n != "":
                n = "p" + n
            return n

        pn = pad_name()
        n = f"fmha_bwd_d{self.F_hdim}_{self.F_dtype}_{self.F_mode}_" + self.F_tile.name
        if pn != "":
            n += f"_{pn}"
        else:
            n += "_npad"

        if self.F_bias != "no":
            n += f"_{self.F_bias}"
        else:
            n += "_nbias"

        if self.F_dbias == "t":
            n += "_dbias"
        else:
            n += "_ndbias"

        if self.F_mask[0:2] == "s_":
            if self.F_mask == "s_mask":
                n += "_mask"
            else:
                n += "_nmask"
        else:
            if self.F_mask != "no":
                n += f"_m{self.F_mask[0]}"
            else:
                n += "_nmask"

        if self.F_dropout != "no":
            n += f"_{self.F_dropout}"
        else:
            n += "_ndropout"

        if self.F_deterministic == "t":
            n += "_deterministic"
        else:
            n += "_ndeterministic"

        if self.F_trload == "t":
            n += "_trload"
        else:
            n += "_ntrload"
        return n

    @property
    def filename(self) -> str:
        return f"{self.name}{self.F_arch.filename_suffix}.cpp"


class KernelComponentFactoryBase:
    pass


class KernelComponentFactoryGfx9(KernelComponentFactoryBase):
    arch = ArchTrait(
        "gfx9", preprocessor_check="defined(__gfx9__) && !defined(__gfx950__)"
    )

    @staticmethod
    def get_dq_dk_dv_tiles(dtype: str, tr_load: str) -> List[FmhaBwdDQDKDVTileSize]:
        if tr_load == "t":
            return []
        if dtype in ["fp32"]:
            return [
                #                     bm0, bn0, bk0, bk1, bk2, bk3, bk4,bhdq,bhdv,
                FmhaBwdDQDKDVTileSize( 32, 128,  32,  32,  32,  32,  64,  32,  32, 1, 4, 1, 4, 1, 1, 2, 2, 1, 16, 16, 16, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 16,  64,  64,  16,  64,  16,  16,  64,  64, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 16, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 16,  64, 128,  16, 128,  16,  16, 128, 128, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 16, 16, 16, 16, 1),
            ]  # fmt: skip
        if dtype in ["fp16", "bf16"]:
            return [
                FmhaBwdDQDKDVTileSize( 32, 128,  32,  32,  32,  32,  64,  32,  32, 1, 4, 1, 4, 1, 1, 2, 2, 1, 16, 16, 32, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 32, 128,  64,  32,  64,  32,  32,  64,  64, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 32, 128,  96,  32,  96,  32,  32,  96,  96, 1, 4, 1, 4, 1, 1, 2, 2, 1, 16, 16, 32, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 16, 128, 128,  16, 128,  16,  32, 128, 128, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 16, 1),
                # FmhaBwdDQDKDVTileSize( 32, 64, 160, 32, 160, 32, 32, 160, 160, 1, 4, 1, 4, 1, 1, 2, 2, 1, 16, 16, 32, 16, 16, 16, 1),
                FmhaBwdDQDKDVTileSize( 16,  64, 256,  16, 256,  16,  32, 256, 256, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 16, 1),
            ]  # fmt: skip
        return []


class KernelComponentFactoryGfx950(KernelComponentFactoryGfx9):
    arch = ArchTrait("gfx950")

    @staticmethod
    def get_dq_dk_dv_tiles(dtype: str, tr_load: str) -> List[FmhaBwdDQDKDVTileSize]:
        results = KernelComponentFactoryGfx9.get_dq_dk_dv_tiles(dtype, tr_load)
        if dtype in ["fp16", "bf16"] and tr_load == "t":
            results.extend([
                FmhaBwdDQDKDVTileSize( 32, 128,  64,  32,  64,  32,  32,  64,  64, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 32, 1),
                FmhaBwdDQDKDVTileSize( 32, 128, 128,  32, 128,  32,  32, 128, 128, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 32, 1),
                FmhaBwdDQDKDVTileSize( 16, 192, 128,  16, 128,  16,  32, 128, 128, 1, 4, 1, 4, 1, 1, 1, 4, 1, 16, 16, 32, 16, 16, 16, 1),
                # FmhaBwdDQDKDVTileSize( 32,  32,  64, 32,  64, 32, 32,  64,  64, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 32, 16, 16, 32, 1, 32),
                FmhaBwdDQDKDVTileSize( 32,  16,  64,  32,  64,  32,  16,  64,  64, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 32, 16, 16, 16, 2, 32),
                # FmhaBwdDQDKDVTileSize( 16, 32, 128, 16, 128, 16, 32, 128, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 32, 16, 16, 16, 1, 16),
                FmhaBwdDQDKDVTileSize( 16,  16, 128,  16, 128,  16,  16, 128, 128, 1, 1, 1, 1, 1, 1, 1, 1, 1, 16, 16, 32, 16, 16, 16, 2, 16),
            ])  # fmt: skip
        return results


class KernelComponentFactoryGfx12(KernelComponentFactoryBase):
    arch = ArchTrait("gfx12")

    @staticmethod
    def get_dq_dk_dv_tiles(dtype: str, tr_load: str) -> List[FmhaBwdDQDKDVTileSize]:
        if tr_load == "t":
            return []
        if dtype in ["fp16", "bf16"]:
            return [
                #                     bm0, bn0, bk0, bk1, bk2, bk3, bk4, bhdq, bhdv,
                FmhaBwdDQDKDVTileSize( 32,  64,  32,  32,  32,  32,  64,   32,   32,  1, 4, 1,  4, 1, 1,  2, 2, 1,  16, 16, 16,  16, 16, 16, -1),
                FmhaBwdDQDKDVTileSize( 32,  64,  64,  32,  64,  32,  32,   64,   64,  1, 4, 1,  4, 1, 1,  1, 4, 1,  16, 16, 16,  16, 16, 16, -1),
                FmhaBwdDQDKDVTileSize( 16,  64, 128,  16, 128,  16,  32,  128,  128,  1, 4, 1,  4, 1, 1,  1, 4, 1,  16, 16, 16,  16, 16, 16, -1),
                FmhaBwdDQDKDVTileSize( 16,  64, 256,  16, 256,  16,  32,  256,  256,  1, 4, 1,  4, 1, 1,  1, 4, 1,  16, 16, 16,  16, 16, 16, -1),
            ]  # fmt: skip
        return []


def get_factory(target: str):
    # Place more specific architectures first

    if target.startswith("gfx950"):
        return KernelComponentFactoryGfx950
    if target.startswith("gfx9"):
        return KernelComponentFactoryGfx9

    if target.startswith("gfx12"):
        return KernelComponentFactoryGfx12

    raise Exception(f"Unsupported device target {target}")


FMHA_BWD_DOT_DO_O_KERNEL_BODY = """
#include <iostream>

#if !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})

using fmha_dtype_{F_idx} = {F_dtype};

using fmha_bwd_dot_do_o_trait_{F_idx} =
    ck_tile::TileFmhaBwdOGradDotOTraits<{F_spad}, {F_dvpad}, {F_occupancy}>;

using fmha_bwd_dot_do_o_pipeline_problem_{F_idx} = ck_tile::BlockFmhaBwdOGradDotOPipelineProblem<
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::ODataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::OGradDataType,
    typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::DDataType,
    /* BlockSize = M0 = */ {F_bm0},
    {F_hdim},
    {F_mode},
    fmha_bwd_dot_do_o_trait_{F_idx}>;

using fmha_bwd_dot_do_o_{F_idx} =
    typename ck_tile::BlockFmhaBwdOGradDotO<fmha_bwd_dot_do_o_pipeline_problem_{F_idx}>;

using fmha_bwd_dot_do_o_kernel_{F_idx} =
    ck_tile::FmhaBwdOGradDotOKernel<fmha_bwd_dot_do_o_{F_idx}>;

using dot_do_o_trait_{F_idx} =
    fmha_bwd_dot_do_o_traits_<{F_hdim}, {F_dtype}, {F_mode}, {F_spad}, {F_dvpad}>;

template <>
float fmha_bwd_dot_do_o_<dot_do_o_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_ = fmha_bwd_dot_do_o_kernel_{F_idx};
    if(s.log_level_ > 0)
        std::cout << ", " << k_::GetName() << std::flush;
    auto [kargs, grids]                    = fmha_bwd_dot_do_o_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    return ck_tile::launch_kernel(
        s, ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs));
}}

template <>
void fmha_bwd_dot_do_o_oneshot_<dot_do_o_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_                               = fmha_bwd_dot_do_o_kernel_{F_idx};
    auto [kargs, grids]                    = fmha_bwd_dot_do_o_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs)(
        ck_tile::stream_config{{s.stream_id_}});
}}

template <>
std::string fmha_bwd_dot_do_o_get_name_<dot_do_o_trait_{F_idx}, {F_arch.tag}>()
{{
    using k_ = fmha_bwd_dot_do_o_kernel_{F_idx};
    return k_::GetName();
}}

#endif // !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})
"""


@dataclass(frozen=True)
class FmhaBwdOGradDotOKernel:
    F_arch: ArchTrait
    F_idx: int  # this is not a tunable, but a counter to differentiate symbol
    F_hdim: int  # hdim
    F_dtype: str  # data type
    F_bm0: int  # tile size along q seqlen (block size)
    F_spad: str  # true/false
    F_dvpad: str  #
    F_mode: str  # value from MODE_MAP
    F_occupancy: int

    @property
    def template(self) -> str:
        return FMHA_BWD_KERNEL_HEADER + FMHA_BWD_DOT_DO_O_KERNEL_BODY.format(
            F_idx=self.F_idx,
            F_arch=self.F_arch,
            F_hdim=self.F_hdim,
            F_dtype=BWD_DTYPE_MAP[self.F_dtype],
            F_bm0=self.F_bm0,
            F_spad=BOOL_MAP[self.F_spad],
            F_dvpad=BOOL_MAP[self.F_dvpad],
            F_mode=MODE_MAP[self.F_mode],
            F_occupancy=self.F_occupancy,
        )

    @property
    def name(self) -> str:
        def pad_name() -> str:
            n = ""
            if self.F_spad == "t":
                n += "s"
            if self.F_dvpad == "t":
                n += "dv"
            if n != "":
                n = "p" + n
            return n

        pn = pad_name()
        n = f"fmha_bwd_dot_do_o_d{self.F_hdim}_{self.F_dtype}_b{self.F_bm0}_{self.F_mode}_o{self.F_occupancy}"
        if pn != "":
            n += f"_{pn}"
        else:
            n += "_npad"
        return n

    @property
    def filename(self) -> str:
        return f"{self.name}{self.F_arch.filename_suffix}.cpp"


FMHA_BWD_CONVERT_DQ_KERNEL_BODY = """
#include <iostream>

#if !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})

using fmha_dtype_{F_idx} = {F_dtype};

using fmha_bwd_convert_dq_trait_{F_idx} =
    ck_tile::TileFmhaBwdConvertQGradTraits<{F_spad}, {F_dpad}, {F_occupancy}>;

using fmha_bwd_convert_dq_pipeline_problem_{F_idx} =
    ck_tile::BlockFmhaBwdConvertQGradPipelineProblem<
        typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::AccDataType,
        typename FmhaBwdTypeConfig<fmha_dtype_{F_idx}>::QGradDataType,
        /* BlockSize = */ 256,
        {F_bm0},
        {F_bn0},
        {F_hdim},
        {F_mode},
        {F_deterministic},
        fmha_bwd_convert_dq_trait_{F_idx}>;

using fmha_bwd_convert_dq_{F_idx} =
    typename ck_tile::BlockFmhaBwdConvertQGrad<fmha_bwd_convert_dq_pipeline_problem_{F_idx}>;

using fmha_bwd_convert_dq_kernel_{F_idx} =
    ck_tile::FmhaBwdConvertQGradKernel<fmha_bwd_convert_dq_{F_idx}>;

using convert_dq_trait_{F_idx} = fmha_bwd_convert_dq_traits_<{F_hdim},
                                                             {F_dtype},
                                                             {F_mode},
                                                             {F_spad},
                                                             {F_dpad},
                                                             {F_deterministic},
                                                             {F_bn0}>;

template <>
float fmha_bwd_convert_dq_<convert_dq_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_ = fmha_bwd_convert_dq_kernel_{F_idx};
    if(s.log_level_ > 0)
        std::cout << ", " << k_::GetName() << std::flush;
    auto [kargs, grids]                    = fmha_bwd_convert_dq_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    return ck_tile::launch_kernel(
        s, ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs));
}}

template <>
void fmha_bwd_convert_dq_oneshot_<convert_dq_trait_{F_idx}, {F_arch.tag}>(const ck_tile::stream_config& s, fmha_bwd_args a)
{{
    using k_                               = fmha_bwd_convert_dq_kernel_{F_idx};
    auto [kargs, grids]                    = fmha_bwd_convert_dq_create_kargs_and_grids<k_>(a);
    const dim3 blocks                      = k_::BlockSize();
    constexpr ck_tile::index_t kBlockPerCu = k_::kBlockPerCu;
    ck_tile::make_kernel<kBlockPerCu, {F_arch.tag}>(k_{{}}, grids, blocks, 0, kargs)(
        ck_tile::stream_config{{s.stream_id_}});
}}

template <>
std::string fmha_bwd_convert_dq_get_name_<convert_dq_trait_{F_idx}, {F_arch.tag}>()
{{
    using k_ = fmha_bwd_convert_dq_kernel_{F_idx};
    return k_::GetName();
}}

#endif // !defined(__HIP_DEVICE_COMPILE__) || ({F_arch.preprocessor_check})
"""


@dataclass(frozen=True)
class FmhaBwdConvertQGradKernel:
    F_arch: ArchTrait
    F_idx: int  # this is not a tunable, but a counter to differentiate symbol
    F_hdim: int  # hdim
    F_dtype: str  # data type
    F_bm0: int  # tile size along q seqlen (block size)
    F_bn0: int  # tile size along k seqlen
    F_spad: str  # true/false
    F_dpad: str  #
    F_mode: str  # value from MODE_MAP
    F_occupancy: int  #
    F_deterministic: str  #
    disabled: bool  # sometimes this kernel is not used

    @property
    def template(self) -> str:
        return FMHA_BWD_KERNEL_HEADER + FMHA_BWD_CONVERT_DQ_KERNEL_BODY.format(
            F_idx=self.F_idx,
            F_arch=self.F_arch,
            F_hdim=self.F_hdim,
            F_dtype=BWD_DTYPE_MAP[self.F_dtype],
            F_bm0=self.F_bm0,
            F_bn0=self.F_bn0,
            F_spad=BOOL_MAP[self.F_spad],
            F_dpad=BOOL_MAP[self.F_dpad],
            F_mode=MODE_MAP[self.F_mode],
            F_occupancy=self.F_occupancy,
            F_deterministic=BOOL_MAP[self.F_deterministic],
        )

    @property
    def name(self) -> str:
        def pad_name() -> str:
            n = ""
            if self.F_spad == "t":
                n += "s"
            if self.F_dpad == "t":
                n += "d"
            if n != "":
                n = "p" + n
            return n

        pn = pad_name()
        n = f"fmha_bwd_convert_dq_d{self.F_hdim}_{self.F_dtype}_b{self.F_bm0}x{self.F_bn0}_{self.F_mode}_o{self.F_occupancy}"
        if pn != "":
            n += f"_{pn}"
        else:
            n += "_npad"
        if self.F_deterministic == "t":
            n += "_deterministic"
        else:
            n += "_ndeterministic"
        return n

    @property
    def filename(self) -> str:
        return f"{self.name}{self.F_arch.filename_suffix}.cpp"


@dataclass(frozen=True)
class FmhaBwdApiTrait:
    arch: ArchTrait
    idx: int  # this is not a tunable, but a counter to differentiate symbol
    # sync with fmha_bwd_traits<>, to generate fallback calls
    hdim: int
    dtype: str  # data type
    mode: str  # value from MODE_MAP
    tile: FmhaBwdDQDKDVTileSize
    mask: str
    bias: str
    dbias: str
    dropout: str
    spad1d: str  # spad for 1d kernels (dot/convert)
    dpad: Literal[0, 1, 8]
    dvpad: Literal[0, 1, 8]
    deterministic: str
    mask_impl: str
    tr_load: str

    @property
    def bm0(self) -> int:
        return self.tile.F_bm0

    @property
    def bn0(self) -> int:
        return self.tile.F_bn0

    @property
    def bhdq(self) -> int:
        return self.tile.F_bhdq

    @property
    def bhdv(self) -> int:
        return self.tile.F_bhdv

    @property
    def scheck(self) -> str:
        if self.mode == "group":
            return "true /*spad1d is always true in group mode*/"
        elif self.spad1d == "t":
            return f"true /*a.seqlen_q % {M0_1D} != 0*/"
        else:  # self.spad1d == "f"
            return f"a.seqlen_q % {M0_1D} == 0"

    @property
    def dcheck(self) -> str:
        if self.dpad == 0:
            return f"a.hdim_q % {self.bhdq} == 0"
        else:
            return f"a.hdim_q % {self.dpad} == 0"

    @property
    def dvcheck(self) -> str:
        if self.dvpad == 0:
            return f"a.hdim_v % {self.bhdv} == 0"
        else:
            return f"a.hdim_v % {self.dvpad} == 0"

    @property
    def max_seq_q_cond(self) -> str:
        if self.tile.max_seq_q != 0:
            return f" && (a.seqlen_q <= {self.tile.max_seq_q})"
        else:
            return ""

    @property
    def extra_cond(self) -> str:
        if self.tr_load == "t" and self.tile.max_seq_q == 0 and self.tile.F_bn0 == 128:
            return " && (a.seqlen_k <= 256)"
        else:
            return ""

    @property
    def convert_dq_bn0(self) -> int:
        return self.tile.F_bn0 if self.deterministic == "t" else 0

    @property
    def dot_do_o_kernel(self) -> FmhaBwdOGradDotOKernel:
        # TODO: we don't support tuning yet, so pick up one value for pad/occupancy
        #       support this in future
        def get_occupancy(dtype, hdim):
            return 2

        F_dvpad = "t" if self.dvpad else "f"
        return FmhaBwdOGradDotOKernel(
            F_arch=self.arch,
            F_idx=self.idx,
            F_hdim=self.hdim,
            F_dtype=self.dtype,
            F_bm0=M0_1D,
            F_spad=self.spad1d,
            F_dvpad=F_dvpad,
            F_mode=self.mode,
            F_occupancy=get_occupancy(self.dtype, self.hdim),
        )

    @property
    def dq_dk_dv_kernel(self) -> FmhaBwdDQDKDVKernel:
        return FmhaBwdDQDKDVKernel(
            F_arch=self.arch,
            F_idx=self.idx,
            F_hdim=self.hdim,
            F_dtype=self.dtype,
            F_tile=self.tile,
            F_dpad=self.dpad,
            F_dvpad=self.dvpad,
            F_bias=self.bias,
            F_dbias=self.dbias,
            F_dropout=self.dropout,
            F_mask=self.mask,
            F_mode=self.mode,
            F_deterministic=self.deterministic,
            mask_impl=self.mask_impl,
            F_trload=self.tr_load,
        )

    @property
    def convert_dq_kernel(self) -> FmhaBwdConvertQGradKernel:
        # TODO: we don't support tuning yet, so pick up one value for pad/occupancy
        #       support this in future
        def get_occupancy(dtype, hdim):
            return 2

        F_dpad = "t" if self.dpad else "f"
        return FmhaBwdConvertQGradKernel(
            F_arch=self.arch,
            F_idx=self.idx,
            F_hdim=self.hdim,
            F_dtype=self.dtype,
            F_bm0=M0_1D,
            F_bn0=self.convert_dq_bn0,
            F_spad=self.spad1d,
            F_dpad=F_dpad,
            F_mode=self.mode,
            F_occupancy=get_occupancy(self.dtype, self.hdim),
            F_deterministic=self.deterministic,
            disabled=self.tile.max_seq_q != 0,
        )


class FmhaBwdApiPool:
    def __init__(self, mask_impl):
        self.dq_dk_dv_pool = OrderedDict()
        self.mask_impl = mask_impl

    def register_dq_dk_dv_traits(self, trait: FmhaBwdApiTrait) -> None:
        hdim = trait.hdim
        ts = (
            self.dq_dk_dv_pool.setdefault(trait.arch, OrderedDict())
            .setdefault(trait.dtype, OrderedDict())
            .setdefault(hdim, [])
        )
        check_duplicates_and_paddings(ts, trait)
        ts.append(copy.copy(trait))

    def _api_inners(self, traits: List[FmhaBwdApiTrait]) -> str:
        inners = ""
        for i_trait, trait in enumerate(traits):
            inners += FMHA_BWD_API_INNER_DISPATCH.format(
                F_if=if_(i_trait),
                F_arch=trait.arch,
                F_mode=MODE_MAP[trait.mode],
                F_mask_check=get_mask_check_map(self.mask_impl)[trait.mask],
                F_mask=get_mask_map(self.mask_impl)[trait.mask],
                F_bias_check=BIAS_CHECK_MAP[trait.bias],
                F_bias=BIAS_MAP[trait.bias],
                F_dbias=BOOL_MAP[trait.dbias],
                F_dropout_check=DROPOUT_CHECK_MAP[trait.dropout],
                F_dropout=DROPOUT_MAP[trait.dropout],
                F_scheck=trait.scheck,
                F_dcheck=trait.dcheck,
                F_dvcheck=trait.dvcheck,
                F_hdim=trait.hdim,
                F_dtype=BWD_DTYPE_MAP[trait.dtype],
                F_spad1d=BOOL_MAP[trait.spad1d],
                F_dpad=trait.dpad,
                F_dvpad=trait.dvpad,
                F_deterministic=BOOL_MAP[trait.deterministic],
                F_trload=BOOL_MAP[trait.tr_load],
                F_maxq=trait.tile.max_seq_q,
                F_convert_dq_enabled=BOOL_MAP[not trait.convert_dq_kernel.disabled],
                F_max_seq_q_cond=trait.max_seq_q_cond,
                F_cond_extra=trait.extra_cond,
                F_bn0=trait.tile.F_bn0,
                F_convert_dq_bn0=trait.convert_dq_bn0,
            )
        return inners

    @staticmethod
    def max_seq_q_sort_key(trait):
        return (
            trait.tile.max_seq_q if trait.tile.max_seq_q != 0 else 1000000
        )  # sort 0 to the end

    @staticmethod
    def dtype_cond(dtype: str) -> str:
        return f't.data_type.compare("{dtype}") == 0'

    @staticmethod
    def hdim_cond(hdim: int) -> str:
        return f"t.hdim_q <= {hdim} && t.hdim_v <= {hdim}"

    @property
    def api(self) -> str:
        per_arch = ""
        for i_arch, (arch, pool_by_arch) in enumerate(self.dq_dk_dv_pool.items()):
            per_dtypes = ""
            for i_dtype, (dtype, pool_by_dtype) in enumerate(pool_by_arch.items()):
                per_hdim_case = ""
                for i_hdim, (hdim, pool_by_hdim) in enumerate(pool_by_dtype.items()):
                    traits = sorted(pool_by_hdim, key=self.max_seq_q_sort_key)
                    inners = self._api_inners(traits)
                    per_hdim_case += FMHA_BWD_API_COND_STATEMENT(
                        if_i=i_hdim, F_cond=self.hdim_cond(hdim), F_body=inners
                    )
                per_dtypes += FMHA_BWD_API_COND_STATEMENT(
                    if_i=i_dtype, F_cond=self.dtype_cond(dtype), F_body=per_hdim_case
                )
            per_arch += FMHA_BWD_API_COND_STATEMENT(
                if_i=i_arch, F_cond=arch.device_name_check, F_body=per_dtypes
            )
        if not per_arch:
            # empty string we add some ignore to suppress warning in api
            per_arch = "(void)t; (void)s; (void)a;"
        result = FMHA_BWD_KERNEL_HEADER + FMHA_BWD_API.format(
            F_dispatch=indent(per_arch)
        )
        return result.replace("\n\n", "\n")


def get_bwd_blobs(
    targets: List[str], filter_list: str, receipt, mask_impl, optdim_list
) -> Tuple[
    FmhaBwdApiPool,
    List[FmhaBwdOGradDotOKernel],
    List[FmhaBwdDQDKDVKernel],
    List[FmhaBwdConvertQGradKernel],
]:
    if filter_list == "":
        filter_list = "*@*@*"
    filters = filter_list.split("@")
    filters.extend(["*"] * (3 - len(filters)))
    filter_dot_do_o = filters[0]
    filter_convert_dq = filters[1]
    filter_dq_dk_dv = filters[2]

    factories = get_factories_for_targets(targets, get_factory)

    # use dict as ordered set
    gen_dot_do_o: Dict[FmhaBwdOGradDotOKernel, Literal[True]] = OrderedDict()
    gen_dq_dk_dv: Dict[FmhaBwdDQDKDVKernel, Literal[True]] = OrderedDict()
    gen_convert_dq: Dict[FmhaBwdConvertQGradKernel, Literal[True]] = OrderedDict()
    api_pool = FmhaBwdApiPool(mask_impl)

    for factory, dtype, tr_load in itertools.product(
        factories, BWD_DTYPE_MAP.keys(), ["t", "f"]
    ):
        tiles: Any = factory.get_dq_dk_dv_tiles(dtype, tr_load)
        spad1d_options = ["f", "t"]
        dpad_options = itertools.product(*([[0, 8, 1]] * 2))
        tf = ["t", "f"]
        for tile, mode, mask, bias, dbias, dropout, spad1d, (
            dpad,
            dvpad,
        ), deterministic in itertools.product(
            tiles,
            MODE_MAP.keys(),
            get_mask_map(mask_impl).keys(),
            BIAS_MAP.keys(),
            tf,
            DROPOUT_MAP.keys(),
            spad1d_options,
            dpad_options,
            tf,
        ):
            assert isinstance(tile, FmhaBwdDQDKDVTileSize), (
                "tile must be FmhaBwdDQDKDVTileSize"
            )
            hdim = tile.F_bhdq
            if (mode == "group") and (spad1d == "f"):
                continue
            if (mode == "group" or ("no" not in mask)) and tile.max_seq_q != 0:
                continue
            if (bias == "no" or bias == "alibi") and dbias == "t":
                continue
            if "wg32" in dropout:
                continue
            if spad1d == "f" and tile.max_seq_q != 0 and tile.max_seq_q < M0_1D:
                continue  # max_seq_q < M0_1D requires padding
            if tr_load == "t":
                # tr_load can only work with 8 pad
                if dpad != dvpad or dpad == 1:
                    continue
            else:  # tr_load == "f"
                # do not generate instance with only 1 of dpad/dvpad being 8
                if dpad != dvpad and dpad == 8:
                    continue
            if optdim_list != [-1]:
                if hdim not in optdim_list:
                    continue
            t = FmhaBwdApiTrait(
                arch=factory.arch,
                idx=0,
                hdim=hdim,
                dtype=dtype,
                mode=mode,
                tile=tile,
                mask=mask,
                bias=bias,
                dbias=dbias,
                dropout=dropout,
                spad1d=spad1d,
                dpad=dpad,
                dvpad=dvpad,
                deterministic=deterministic,
                mask_impl=mask_impl,
                tr_load=tr_load,
            )

            if not fnmatch.fnmatch(t.dot_do_o_kernel.name, filter_dot_do_o):
                continue
            if not fnmatch.fnmatch(t.convert_dq_kernel.name, filter_convert_dq):
                continue
            if not fnmatch.fnmatch(t.dq_dk_dv_kernel.name, filter_dq_dk_dv):
                continue

            # Flash attention integration
            if receipt == 2:
                cond = dtype in ["fp16", "bf16"]
                cond &= bias in ["no", "alibi"]
                cond &= dropout in ["no", "dropout_wg32", "dropout_wg16"]
                cond &= dpad == dvpad
                if not cond:
                    continue
            elif receipt == 3:
                cond = dtype in ["fp16", "bf16"]
                cond &= bias in ["no", "alibi"]
                cond &= dpad == dvpad
                cond &= deterministic == "f"
                if not cond:
                    continue
            # PyTorch integration
            elif receipt == 4:
                cond = dtype in ["fp16", "bf16"]
                cond &= bias in ["no", "bias"]
                cond &= dropout in ["no", "dropout_wg32", "dropout_wg16"]
                cond &= dpad == dvpad
                cond &= deterministic == "f"
                if not cond:
                    continue
            # Aiter (mha_bwd) integration
            elif receipt == 300:
                cond = dtype in ["fp16", "bf16"]
                cond &= mode == "batch"
                cond &= dropout in ["no", "dropout_wg32", "dropout_wg16"]
                if not cond:
                    continue
            # Aiter (mha_varlen_bwd) integration
            elif receipt == 400:
                cond = dtype in ["fp16", "bf16"]
                cond &= mode == "group"
                cond &= dropout in ["no", "dropout_wg32", "dropout_wg16"]
                if not cond:
                    continue
            # aiter::mha_bwd C++ api integration
            elif receipt == 600:
                cond = dtype in ["fp16", "bf16"]
                if not cond:
                    continue

            # fp32 only, all variations
            if receipt == 800:
                cond = dtype == "fp32"
                cond &= dpad == dvpad
                if not cond:
                    continue
            # fp32 only, minimal set of parameters
            elif receipt == 801:
                cond = dtype == "fp32"
                cond &= hdim in [64, 128]
                cond &= dpad == dvpad
                cond &= mode == "batch"
                cond &= bias == "no"
                cond &= dropout == "no"
                cond &= mask == "s_no"
                cond &= deterministic == "f"
                if not cond:
                    continue
            else:
                # Don't build fp32 by default
                if dtype == "fp32":
                    continue

            gen_dot_do_o[t.dot_do_o_kernel] = True
            gen_dq_dk_dv[t.dq_dk_dv_kernel] = True
            if not t.convert_dq_kernel.disabled:
                gen_convert_dq[t.convert_dq_kernel] = True
            api_pool.register_dq_dk_dv_traits(t)

    return (
        api_pool,
        list(gen_dot_do_o.keys()),
        list(gen_dq_dk_dv.keys()),
        list(gen_convert_dq.keys()),
    )


def write_blobs(
    targets: List[str],
    output_dir: Path,
    filter_list: str,
    receipt,
    optdim_list,
    mask_impl,
) -> None:
    api_pool, kernels_dot_do_o, kernels_dq_dk_dv, kernels_convert_dq = get_bwd_blobs(
        targets, filter_list, receipt, mask_impl, optdim_list
    )
    update_file(output_dir / FMHA_BWD_API_FILENAME, api_pool.api)
    for k in kernels_dot_do_o:
        update_file(output_dir / k.filename, k.template)
    for k in kernels_convert_dq:
        update_file(output_dir / k.filename, k.template)
    for k in kernels_dq_dk_dv:
        update_file(output_dir / k.filename, k.template)


def list_blobs(
    targets: List[str],
    file_path: Path,
    filter_list: str,
    receipt,
    optdim_list,
    mask_impl,
) -> None:
    _, kernels_dot_do_o, kernels_dq_dk_dv, kernels_convert_dq = get_bwd_blobs(
        targets, filter_list, receipt, mask_impl, optdim_list
    )
    with file_path.open("a") as f:
        for k in kernels_dot_do_o:
            f.write(str(file_path.parent / GEN_DIR / k.filename) + "\n")
        for k in kernels_dq_dk_dv:
            f.write(str(file_path.parent / GEN_DIR / k.filename) + "\n")
        for k in kernels_convert_dq:
            f.write(str(file_path.parent / GEN_DIR / k.filename) + "\n")
        f.write(str(file_path.parent / GEN_DIR / FMHA_BWD_API_FILENAME) + "\n")
