# -*- coding: utf-8 -*-

import os
from typing import List, Optional, Tuple

import pytest
import torch
import triton
import triton.language as tl
from fla.ops.utils.op import exp

from fla.utils import assert_close
from fla.modules.l2norm import l2norm_fwd

from fla.utils import input_guard, is_amd
BT_LIST = [8, 16, 32, 64, 128]
NUM_WARPS_AUTOTUNE = [1, 2, 4, 8, 16] if is_amd else [1, 2, 4, 8, 16, 32]

@triton.autotune(
    configs=[
        triton.Config({}, num_warps=num_warps)
        for num_warps in NUM_WARPS_AUTOTUNE
    ],
    key=['D']
)
@triton.jit
def l2norm_fwd_kernel1(
    x,
    y,
    rstd,
    eps,
    D,
    BD: tl.constexpr,
):
    i_t = tl.program_id(0)
    x += i_t * D
    y += i_t * D
    # Compute mean and variance
    cols = tl.arange(0, BD)
    mask = cols < D

    b_x = tl.load(x + cols, mask=mask, other=0.0).to(tl.float32)
    b_rstd = 1 / tl.sqrt(tl.sum(b_x * b_x) + eps)
    b_y = b_x * b_rstd
    tl.store(y + cols, b_y, mask=mask)
    tl.store(rstd + i_t, b_rstd)

@triton.autotune(
    configs=[
        triton.Config({'BT': BT}, num_warps=num_warps)
        for num_warps in [1, 2, 4, 8, 16]
        for BT in BT_LIST
    ],
    key=['D', 'NB']
)
@triton.jit
def l2norm_fwd_kernel(
    x,
    y,
    rstd,
    eps,
    T: tl.constexpr,
    D: tl.constexpr,
    BD: tl.constexpr,
    NB: tl.constexpr,
    BT: tl.constexpr,
):
    i_t = tl.program_id(0)
    p_x = tl.make_block_ptr(x, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0))
    p_y = tl.make_block_ptr(y, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0))
    p_rstd = tl.make_block_ptr(rstd, (T,), (1,), (i_t * BT,), (BT,), (0,))

    b_x = tl.load(p_x, boundary_check=(0, 1)).to(tl.float32)
    b_rstd = 1 / tl.sqrt(tl.sum(b_x * b_x, 1) + eps)
    b_y = b_x * b_rstd[:, None]

    tl.store(p_y, b_y.to(p_y.dtype.element_ty), boundary_check=(0, 1))
    tl.store(p_rstd, b_rstd.to(p_rstd.dtype.element_ty), boundary_check=(0,))


@triton.heuristics({
    'IS_VARLEN': lambda args: args['cu_seqlens'] is not None,
})
@triton.autotune(
    configs=[
        triton.Config({}, num_warps=num_warps, num_stages=num_stages)
        for num_warps in [2, 4]
        for num_stages in [2, 3, 4]
    ],
    key=['H', 'K', 'V', 'BT', 'BK', 'BV', 'IS_VARLEN'],
)
@triton.jit(do_not_specialize=['T'])
def prepare_wy_repr_bwd_kernel(	
    k,	
    v,	
    beta,	
    g,	
    A,	
    dw,	
    du,	
    dk,	
    dv,	
    dbeta,	
    dg,	
    cu_seqlens,	
    chunk_indices,	
    T,	
    H: tl.constexpr,	
    K: tl.constexpr,	
    V: tl.constexpr,	
    BT: tl.constexpr,	
    BK: tl.constexpr,	
    BV: tl.constexpr,	
    IS_VARLEN: tl.constexpr	
):	
    i_t, i_bh = tl.program_id(0), tl.program_id(1)	
    i_b, i_h = i_bh // H, i_bh % H	
    if IS_VARLEN:	
        i_n, i_t = tl.load(chunk_indices + i_t * 2).to(tl.int32), tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32)	
        bos, eos = tl.load(cu_seqlens + i_n).to(tl.int32), tl.load(cu_seqlens + i_n + 1).to(tl.int32)	
        T = eos - bos	
    else:	
        bos, eos = i_b * T, i_b * T + T	

    p_beta = tl.make_block_ptr(beta + (bos*H + i_h), (T,), (H,), (i_t * BT,), (BT,), (0,))	
    p_g = tl.make_block_ptr(g + (bos*H + i_h), (T,), (H,), (i_t * BT,), (BT,), (0,))	
    p_A = tl.make_block_ptr(A + (bos*H + i_h) * BT, (BT, T), (1, H*BT), (0, i_t * BT), (BT, BT), (0, 1))	

    b_A = tl.load(p_A, boundary_check=(0, 1))	
    b_beta = tl.load(p_beta, boundary_check=(0,))	
    b_g = tl.load(p_g, boundary_check=(0,))	
    b_g_exp = tl.exp(b_g)	

    b_dbeta = tl.zeros([BT], dtype=tl.float32)	
    b_dA = tl.zeros([BT, BT], dtype=tl.float32)	
    b_dg = tl.zeros([BT], dtype=tl.float32)	

    for i_k in range(tl.cdiv(K, BK)):	
        p_k = tl.make_block_ptr(k + (bos*H + i_h) * K, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))	
        p_dk = tl.make_block_ptr(dk + (bos*H + i_h) * K, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))	
        p_dw = tl.make_block_ptr(dw + (bos*H + i_h) * K, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))	
        b_k = tl.load(p_k, boundary_check=(0, 1))	
        b_k_beta_g = (b_k * b_beta[:, None] * b_g_exp[:, None]).to(b_k.dtype)	
        b_dw = tl.load(p_dw, boundary_check=(0, 1))	
        b_dA += tl.dot(b_dw, tl.trans(b_k_beta_g))	
        b_dk_beta_g = tl.dot(b_A, b_dw)	
        b_dk = b_dk_beta_g * b_beta[:, None] * b_g_exp[:, None]	
        b_dbeta += tl.sum(b_dk_beta_g * b_k * b_g_exp[:, None], 1)	
        b_dg += tl.sum(b_dk_beta_g * b_k * b_g_exp[:, None] * b_beta[:, None], 1)	
        tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))	

    for i_v in range(tl.cdiv(V, BV)):	
        p_v = tl.make_block_ptr(v + (bos*H + i_h) * V, (T, V), (H*V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))	
        p_dv = tl.make_block_ptr(dv + (bos*H + i_h) * V, (T, V), (H*V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))	
        p_du = tl.make_block_ptr(du + (bos*H + i_h) * V, (T, V), (H*V, 1), (i_t * BT, i_v * BV), (BT, BV), (1, 0))	
        b_v = tl.load(p_v, boundary_check=(0, 1))	
        b_v_beta = (b_v * b_beta[:, None]).to(b_v.dtype)	
        b_du = tl.load(p_du, boundary_check=(0, 1))	
        b_dA += tl.dot(b_du, tl.trans(b_v_beta))	
        b_dv_beta = tl.dot(b_A, b_du)	
        b_dv = b_dv_beta * b_beta[:, None]	
        b_dbeta += tl.sum(b_dv_beta * b_v, 1)	
        tl.store(p_dv, b_dv.to(p_dv.dtype.element_ty), boundary_check=(0, 1))	

    o_t = i_t * BT + tl.arange(0, BT)	
    m_t = o_t < T	
    m_A = (o_t[:, None] > o_t[None, :]) & (m_t[:, None] & m_t)	
    b_dA = tl.where(m_A, b_dA, 0)	
    b_dA = tl.dot(b_dA.to(b_A.dtype), b_A)	
    b_dA = tl.dot(b_A, b_dA.to(b_A.dtype))	
    b_dA = tl.where(m_A, -b_dA * exp(b_g[:, None] - b_g[None, :]), 0)	
    b_dA = b_dA.to(k.dtype.element_ty)	
    b_A = tl.zeros([BT, BT], dtype=tl.float32)	

    for i_k in range(tl.cdiv(K, BK)):	
        p_k = tl.make_block_ptr(k + (bos*H + i_h) * K, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))	
        p_dk = tl.make_block_ptr(dk + (bos*H + i_h) * K, (T, K), (H*K, 1), (i_t * BT, i_k * BK), (BT, BK), (1, 0))	
        b_k = tl.load(p_k, boundary_check=(0, 1))	
        b_dk = tl.load(p_dk, boundary_check=(0, 1))	
        b_k_beta = (b_k * b_beta[:, None]).to(b_k.dtype)	
        b_A += tl.dot(b_k_beta, tl.trans(b_k))	
        b_dk_beta = tl.dot(b_dA, b_k)	
        b_dbeta += tl.sum(b_dk_beta * b_k, 1)	
        b_dk += tl.dot(tl.trans(b_dA), b_k_beta)	
        b_dk += b_dk_beta * b_beta[:, None]	
        tl.store(p_dk, b_dk.to(p_dk.dtype.element_ty), boundary_check=(0, 1))	

    b_dA_A = b_dA * b_A	
    b_dg += tl.sum(b_dA_A, axis=1) - tl.sum(b_dA_A, axis=0)	
    p_dg = tl.make_block_ptr(dg + (bos*H + i_h), (T,), (H,), (i_t * BT,), (BT,), (0,))	
    p_dbeta = tl.make_block_ptr(dbeta + (bos*H + i_h), (T,), (H,), (i_t * BT,), (BT,), (0,))	
    tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0,))	
    tl.store(p_dbeta, b_dbeta.to(p_dbeta.dtype.element_ty), boundary_check=(0,))


def ref_l2norm_fwd(
    x: torch.Tensor,
    eps: float = 1e-6,
    output_dtype: Optional[torch.dtype] = None
):
    x_shape_og = x.shape
    x = x.view(-1, x.shape[-1])
    # allocate output
    if output_dtype is None:
        y = torch.empty_like(x)
    else:
        y = torch.empty_like(x, dtype=output_dtype)
    assert y.stride(-1) == 1
    T, D = x.shape[0], x.shape[-1]
    # Less than 64KB per feature: enqueue fused kernel
    MAX_FUSED_SIZE = 65536 // x.element_size()
    BD = min(MAX_FUSED_SIZE, triton.next_power_of_2(D))
    if D > BD:
        raise RuntimeError("This layer doesn't support feature dim >= 64KB.")

    rstd = torch.empty((T,), dtype=torch.float32, device=x.device)
    if D <= 512:
        NB = triton.cdiv(T, 2048)
        def grid(meta): return (triton.cdiv(T, meta['BT']), )
        l2norm_fwd_kernel[grid](
            x=x,
            y=y,
            rstd=rstd,
            eps=eps,
            T=T,
            D=D,
            BD=BD,
            NB=NB,
        )
    else:
        l2norm_fwd_kernel1[(T,)](
            x=x,
            y=y,
            rstd=rstd,
            eps=eps,
            D=D,
            BD=BD,
        )
    return y.view(x_shape_og), rstd.view(x_shape_og[:-1])

@pytest.mark.parametrize(
    ('B', 'T', 'H', 'K'),
    [
        pytest.param(*test, id="B{}-T{}-H{}-K{}".format(*test))
        for test in [
            (1, 1024, 32, 128),
            (1, 4096, 32, 128),
        ]
    ]
)
def test_recompute_w_u_fwd(B, T, H, K):

    device = "npu:0"
    device_dtype = torch.float32

    x = torch.rand((B, T, H, K), device=device, dtype=device_dtype)

    ref_y, ref_rstd = ref_l2norm_fwd(
                    x=x
                )
    
    y, rstd = l2norm_fwd(
                    x=x
                )

    assert_close('y', ref_y, y, 0.001)
    assert_close('rstd', ref_rstd, rstd, 0.001)