from typing import (
    Any, Callable, ContextManager, Iterator, List, Literal, NamedTuple, Optional, Sequence, Tuple, TypeVar,
    Union, overload,
)

import torch
from torchair._ge_concrete_graph import ge_apis as ge
from torchair._ge_concrete_graph.fx2ge_converter import declare_supported, register_fx_node_ge_converter
from torchair.ge._ge_graph import Tensor, TensorSpec, DataType
from torchair._ge_concrete_graph.supported_declaration import _TypedTensor, F32, F16, F64, I32, I16, I64, I8, U8, \
    BOOL, Support

lib = torch.library.Library("air", "FRAGMENT")
lib.define(
    """
    npu_fused_infer_attention_score_v2(Tensor query, Tensor key, Tensor value, *, Tensor? query_rope=None, \
    Tensor? key_rope=None, Tensor? pse_shift=None, Tensor? atten_mask=None, Tensor? actual_seq_qlen=None,  \
    Tensor? actual_seq_kvlen=None, Tensor? block_table=None, Tensor? dequant_scale_query=None, \
    Tensor? dequant_scale_key=None, Tensor? dequant_offset_key=None, Tensor? dequant_scale_value=None,  \
    Tensor? dequant_offset_value=None, Tensor? dequant_scale_key_rope=None,  \
    Tensor? quant_scale_out=None, Tensor? quant_offset_out=None, Tensor? learnable_sink=None, int num_query_heads=1, \
    int num_key_value_heads=0,  float softmax_scale=1.0, int pre_tokens=2147483647, int next_tokens=2147483647, \
    str input_layout="BSH", int sparse_mode=0, int block_size=0, int query_quant_mode=0, int key_quant_mode=0, \
    int value_quant_mode=0, int inner_precise=0, bool return_softmax_lse=False, int? query_dtype=None, \
    int? key_dtype=None, int? value_dtype=None, int? query_rope_dtype=None, int? key_rope_dtype=None, \
    int? key_shared_prefix_dtype=None, int? value_shared_prefix_dtype=None, int? dequant_scale_query_dtype=None, \
    int? dequant_scale_key_dtype=None, int? dequant_scale_value_dtype=None, int? dequant_scale_key_rope_dtype=None) \
    -> (Tensor, Tensor)
    """
)


def _npu_fused_infer_attention_score_v2(*args, **kwargs):
    return torch.ops.air.npu_fused_infer_attention_score_v2(*args, **kwargs)


def npu_fused_infer_attention_score_v2_impl(*args, **kwargs):
    raise NotImplementedError("eager mode of torchair.ops.npu_fused_infer_attention_score_v2 is not supported, " +
                              "use graph mode or torch_npu.npu_fused_infer_attention_score_v2!")


torch.library.impl(lib, "npu_fused_infer_attention_score_v2", "CPU")(npu_fused_infer_attention_score_v2_impl)
torch.library.impl(lib, "npu_fused_infer_attention_score_v2", "PrivateUse1")(npu_fused_infer_attention_score_v2_impl)


@declare_supported(
    [
        # 支持输入q、k、v，BSH三维格式, q:b=1, s=2048, h=40*128;k/v:b=1, s=2048, h=40*128;
        Support(F16(1, 2048, 40 * 128), F16(1, 2048, 40 * 128), F16(1, 2048, 40 * 128),
            num_query_heads=40, input_layout="BSH"),
        # 支持输入q、k、v，BNSD四维格式
        Support(F16(1, 40, 2048, 128), F16(1, 40, 2048, 128), F16(1, 40, 2048, 128),
            num_query_heads=40, input_layout="BNSD"),
        # 支持设置scale_value
        Support(F16(1, 40, 2048, 128), F16(1, 40, 2048, 128), F16(1, 40, 2048, 128),
            input_layout="BNSD", num_query_heads=40, softmax_scale=0.0884),
    ]
)
@register_fx_node_ge_converter(torch.ops.air.npu_fused_infer_attention_score_v2.default)
def convert_npu_npu_fused_infer_attention_score_v2_tensor(
    query: Tensor,
    key: Tensor,
    value: Tensor,
    *,
    query_rope: Optional[Tensor] = None,
    key_rope: Optional[Tensor] = None,
    pse_shift: Optional[Tensor] = None,
    atten_mask: Optional[Tensor] = None,
    actual_seq_qlen: Optional[Tensor] = None,
    actual_seq_kvlen: Optional[Tensor] = None,
    block_table: Optional[Tensor] = None,
    dequant_scale_query: Optional[Tensor] = None,
    dequant_scale_key: Optional[Tensor] = None,
    dequant_offset_key: Optional[Tensor] = None,
    dequant_scale_value: Optional[Tensor] = None,
    dequant_offset_value: Optional[Tensor] = None,
    dequant_scale_key_rope: Optional[Tensor] = None,
    quant_scale_out: Optional[Tensor] = None,
    quant_offset_out: Optional[Tensor] = None,
    learnable_sink: Optional[Tensor] = None,
    num_query_heads: int = 1,
    num_key_value_heads: int = 0,
    softmax_scale: float = 1.0,
    pre_tokens: int = 2147483647,
    next_tokens: int = 2147483647,
    input_layout: str = "BSH",
    sparse_mode: int = 0,
    block_size: int = 0,
    query_quant_mode: int = 0,
    key_quant_mode: int = 0,
    value_quant_mode: int = 0,
    inner_precise: int = 0,
    return_softmax_lse: bool = False,
    query_dtype: Optional[int] = None,
    key_dtype: Optional[int] = None,
    value_dtype: Optional[int] = None,
    query_rope_dtype: Optional[int] = None,
    key_rope_dtype: Optional[int] = None,
    key_shared_prefix_dtype: Optional[int] = None,
    value_shared_prefix_dtype: Optional[int] = None,
    dequant_scale_query_dtype: Optional[int] = None,
    dequant_scale_key_dtype: Optional[int] = None,
    dequant_scale_value_dtype: Optional[int] = None,
    dequant_scale_key_rope_dtype: Optional[int] = None,
    meta_outputs: TensorSpec = None,
):
    # 禁止单独修改此函数，请同步修改actual seq length为symint list的接口
    if input_layout == 'BSH':
        const = ge.Const([1, 1, 8])
    else:
        const = ge.Const([1, 1, 1, 8])
    if key is not None and key.dtype == DataType.DT_INT32:
        shape = ge.Shape(key)
        key_shape = ge.Mul(shape, const)
        key = ge.Bitcast(key, type=DataType.DT_INT4)
        key = ge.Reshape(key, key_shape)

    if value is not None and value.dtype == DataType.DT_INT32:
        shape = ge.Shape(value)
        value_shape = ge.Mul(shape, const)
        value = ge.Bitcast(value, type=DataType.DT_INT4)
        value = ge.Reshape(value, value_shape)

    key_list = [key]
    value_list = [value]
    # dropped params
    quant_scale1 = None
    dequant_scale2 = None
    dequant_scale1 = None
    antiquant_scale = None
    antiquant_offset = None
    query_padding_size = None
    kv_padding_size = None
    key_shared_prefix = None
    value_shared_prefix = None
    actual_shared_prefix_len = None
    antiquant_mode = 0
    return ge.FusedInferAttentionScore(query, key_list, value_list, pse_shift=pse_shift, atten_mask=atten_mask,
        actual_seq_lengths=actual_seq_qlen, actual_seq_lengths_kv=actual_seq_kvlen,
        dequant_scale1=dequant_scale1, quant_scale1=quant_scale1, dequant_scale2=dequant_scale2,
        quant_scale2=quant_scale_out, quant_offset2=quant_offset_out, antiquant_scale=antiquant_scale,
        antiquant_offset=antiquant_offset, block_table=block_table, query_padding_size=query_padding_size,
        kv_padding_size=kv_padding_size, key_antiquant_scale=dequant_scale_key,
        key_antiquant_offset=dequant_offset_key, value_antiquant_scale=dequant_scale_value,
        value_antiquant_offset=dequant_offset_value, key_shared_prefix=key_shared_prefix,
        value_shared_prefix=value_shared_prefix, actual_shared_prefix_len=actual_shared_prefix_len,
        query_rope=query_rope, key_rope=key_rope, key_rope_antiquant_scale=dequant_scale_key_rope,
        dequant_scale_query=dequant_scale_query, learnable_sink=learnable_sink,
        q_start_idx=None, kv_start_idx=None, num_heads=num_query_heads, scale=softmax_scale,
        pre_tokens=pre_tokens, next_tokens=next_tokens, input_layout=input_layout,
        num_key_value_heads=num_key_value_heads, sparse_mode=sparse_mode, inner_precise=inner_precise,
        block_size=block_size, antiquant_mode=antiquant_mode, softmax_lse_flag=return_softmax_lse,
        key_antiquant_mode=key_quant_mode, value_antiquant_mode=value_quant_mode, query_quant_mode=query_quant_mode,
        pse_type=0, out_dtype=0)


@torch.library.impl(lib, "npu_fused_infer_attention_score_v2", "Meta")
def npu_fused_infer_attention_score_v2_meta_impl(query, key, value, *, query_rope=None, key_rope=None, pse_shift=None,
    atten_mask=None, actual_seq_qlen=None, actual_seq_kvlen=None, block_table=None, dequant_scale_query=None,
    dequant_scale_key=None, dequant_offset_key=None, dequant_scale_value=None, dequant_offset_value=None,
    dequant_scale_key_rope=None, quant_scale_out=None, quant_offset_out=None, learnable_sink=None, num_query_heads=1, 
    num_key_value_heads=0, softmax_scale=1.0, pre_tokens=2147483647, next_tokens=2147483647, input_layout="BSH", 
    sparse_mode=0, block_size=0, query_quant_mode=0, key_quant_mode=0, value_quant_mode=0, inner_precise=0, 
    return_softmax_lse=False, query_dtype=None, key_dtype=None, value_dtype=None, query_rope_dtype=None, 
    key_rope_dtype=None, key_shared_prefix_dtype=None, value_shared_prefix_dtype=None, dequant_scale_query_dtype=None, 
    dequant_scale_key_dtype=None, dequant_scale_value_dtype=None, dequant_scale_key_rope_dtype=None):
    # 禁止单独修改此函数，请同步修改actual seq length为symint list的接口
    tmp_out = torch.empty_like(query, dtype=query.dtype, device='meta')
    B = 1
    N = 1
    S1 = 1
    if input_layout == "BNSD_BSND":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 4,
            lambda: "Layout BNSD_BSND, queryDims must be 4!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(0), query.size(2), query.size(1), query.size(3)], dtype=query.dtype, \
                              device='meta')
        B = query.size(0)
        N = query.size(1)
        S1 = query.size(2)
    if input_layout == "BNSD_NBSD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 4,
            lambda: "Layout BNSD_NBSD, queryDims must be 4!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(1), query.size(0), query.size(2), query.size(3)], dtype=query.dtype, \
                              device='meta')
        B = query.size(0)
        N = query.size(1)
        S1 = query.size(2)
    if input_layout == "BSND_NBSD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 4,
            lambda: "Layout BSND_NBSD, queryDims must be 4!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(2), query.size(0), query.size(1), query.size(3)], dtype=query.dtype, \
                              device='meta')
        B = query.size(0)
        N = query.size(2)
        S1 = query.size(1)
    if input_layout == "BSH_NBSD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout BSH_NBSD, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([num_query_heads, query.size(0), query.size(1), query.size(2) // num_query_heads], \
                              dtype=query.dtype, device='meta')
        B = query.size(0)
        N = num_query_heads
        S1 = query.size(1)
    if input_layout == "BNSD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 4,
            lambda: "Layout BNSD, queryDims must be 4!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(0), query.size(1), query.size(2), query.size(3)], dtype=query.dtype, \
                              device='meta')
        B = query.size(0)
        N = query.size(1)
        S1 = query.size(2)
    if input_layout == "BSH":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout BSH, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(0), query.size(1), query.size(2)], dtype=query.dtype, device='meta')
        B = query.size(0)
        N = num_query_heads
        S1 = query.size(1)
    if input_layout == "BSND":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 4,
            lambda: "Layout BSND, queryDims must be 4!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(0), query.size(1), query.size(2), query.size(3)], dtype=query.dtype, \
                              device='meta')
        B = query.size(0)
        N = num_query_heads
        S1 = query.size(1)
    if input_layout == "NSD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout NSD, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(0), query.size(1), query.size(2)], dtype=query.dtype, device='meta')
        B = 1
        N = query.size(0)
        S1 = query.size(1)
    if input_layout == "TND":
        if num_key_value_heads == 0:
            num_key_value_heads = num_query_heads
        token_kv_dim = key.dim()
        enable_pa = block_table is not None
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout TND, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        if enable_pa: # IFA目前TND只支持PA场景，PFA目前TND只支持非PA场景
            torch._check(
                token_kv_dim == 3 or token_kv_dim == 4 or token_kv_dim == 5,
                lambda: "Layout NTD_TND, token_kv_dim must be 3/4/5!, but the actual value is "\
                 + str(token_kv_dim) + ops_error(ErrCode.VALUE),
            )
            if token_kv_dim == 3:
                tmp_out = torch.empty([query.size(0), query.size(1), value.size(2) // num_key_value_heads], \
                                    dtype=query.dtype, device='meta')
            elif token_kv_dim == 4:
                tmp_out = torch.empty([query.size(0), query.size(1), value.size(3)], dtype=query.dtype, device='meta')
            else:
                tmp_out = torch.empty([query.size(0), query.size(1), value.size(4) * value.size(2)], \
                                    dtype=query.dtype, device='meta')
        else:
            tmp_out = torch.empty([query.size(0), query.size(1), value.size(2)], dtype=query.dtype, device='meta')
    if input_layout == "TND_NTD":
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout TND_NTD, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        tmp_out = torch.empty([query.size(1), query.size(0), query.size(2)], dtype=query.dtype, device='meta')
    if input_layout == "NTD_TND":
        if num_key_value_heads == 0:
            num_key_value_heads = num_query_heads
        token_kv_dim = key.dim()
        enable_pa = block_table is not None
        token_x_dim = query.dim()
        torch._check(
            token_x_dim == 3,
            lambda: "Layout NTD_TND, queryDims must be 3!, but the actual value is "\
             + str(token_x_dim) + ops_error(ErrCode.VALUE),
        )
        if enable_pa:
            torch._check(
                token_kv_dim == 3 or token_kv_dim == 4 or token_kv_dim == 5,
                lambda: "Layout NTD_TND, token_kv_dim must be 3/4/5!, but the actual value is "\
                 + str(token_kv_dim) + ops_error(ErrCode.VALUE),
            )
            if token_kv_dim == 3:
                tmp_out = torch.empty([query.size(1), query.size(0), value.size(2) // num_key_value_heads], \
                                    dtype=query.dtype, device='meta')
            elif token_kv_dim == 4:
                tmp_out = torch.empty([query.size(1), query.size(0), value.size(3)], dtype=query.dtype, device='meta')
            else:
                tmp_out = torch.empty([query.size(1), query.size(0), value.size(4) * value.size(2)], \
                                    dtype=query.dtype, device='meta')
        else:
            tmp_out = torch.empty([query.size(1), query.size(0), value.size(2)], dtype=query.dtype, device='meta')
    if quant_scale_out is not None:
        if (return_softmax_lse):
            if input_layout == "TND":
                return (torch.empty_like(tmp_out, dtype=torch.int8), torch.empty([query.size(0), num_query_heads, 1], \
                        dtype=torch.float32, device='meta'))
            elif input_layout == "TND_NTD":
                return (torch.empty_like(tmp_out, dtype=torch.int8), torch.empty([num_query_heads, query.size(0), 1], \
                        dtype=torch.float32, device='meta'))
            else:
                return (torch.empty_like(tmp_out, dtype=torch.int8), torch.empty([B, N, S1, 1], dtype=torch.float32, \
                        device='meta'))
        else:
            return (torch.empty_like(tmp_out, dtype=torch.int8), torch.empty([0], dtype=torch.float32, device='meta'))
    elif query.dtype == torch.int8:
        out_type = torch.half
        if query_rope is not None:
            out_type = query_rope.dtype
        if (return_softmax_lse):
            if input_layout == "TND":
                return (torch.empty_like(tmp_out, dtype=out_type), torch.empty([query.size(0), num_query_heads, 1], \
                    dtype=torch.float32, device='meta'))
            elif input_layout == "TND_NTD":
                return (torch.empty_like(tmp_out, dtype=out_type), torch.empty([num_query_heads, query.size(0), 1], \
                    dtype=torch.float32, device='meta'))
            else:
                return (torch.empty_like(tmp_out, dtype=out_type), torch.empty([B, N, S1, 1], dtype=torch.float32, \
                                                                                 device='meta'))
        else:
            return (torch.empty_like(tmp_out, dtype=out_type), torch.empty([0], dtype=torch.float32, device='meta'))
    else:
        if (return_softmax_lse):
            if input_layout == "TND":
                if block_table is not None: # IFA目前TND只支持PA场景，PFA目前TND只支持非PA场景
                    if query.size(2) == 0:
                        return (torch.empty_like(tmp_out), torch.empty([query.size(0), num_query_heads, 0], \
                                                                    dtype=torch.float32, device='meta'))
                    return (torch.empty_like(tmp_out), torch.empty([query.size(0), num_query_heads, 1], \
                                                                   dtype=torch.float32, device='meta'))
                else:
                    return (torch.empty_like(tmp_out), torch.empty([query.size(0), query.size(1), 1], \
                                                                   dtype=torch.float32, device='meta'))
            elif input_layout == "TND_NTD":
                return (torch.empty_like(tmp_out), torch.empty([num_query_heads, query.size(0), 1], \
                                                               dtype=torch.float32, device='meta'))
            elif input_layout == "NTD_TND":
                if block_table is not None: 
                    if query.size(2) == 0:
                        return (torch.empty_like(tmp_out), torch.empty([query.size(1), query.size(0), 0], \
                                                                    dtype=torch.float32, device='meta'))
                    return (torch.empty_like(tmp_out), torch.empty([query.size(1), query.size(0), 1], \
                                                                dtype=torch.float32, device='meta'))
                return (torch.empty_like(tmp_out), torch.empty([query.size(1), query.size(0), 1], dtype=torch.float32, \
                                                               device='meta'))
            else:
                return (torch.empty_like(tmp_out), torch.empty([B, N, S1, 1], dtype=torch.float32, device='meta'))
        else:
            return (torch.empty_like(tmp_out), torch.empty([0], dtype=torch.float32, device='meta'))