import triton
import triton.language as tl
import torch
import torch_npu
import pytest
import numpy as np
import random
from triton.runtime.driver import driver

def get_soc_info():
    target = driver.active.get_current_target()
    device = driver.active.get_current_device()
    prop = driver.active.utils.get_device_properties(device)
    return prop

@triton.jit
def _forward(
    q_ptr, k_ptr, v_ptr, out_ptr,
    stride_q_0, stride_q_1, stride_q_2, stride_q_3,
    stride_k_0, stride_k_1, stride_k_2, stride_k_3,
    stride_v_0, stride_v_1, stride_v_2, stride_v_3,
    stride_o_0, stride_o_1, stride_o_2, stride_o_3,
    batch_size, num_q_heads, num_kv_heads, q_len, kv_len, head_dim,
    q_loops, bh_loops,
    BLOCK_M: tl.constexpr,
    BLOCK_N: tl.constexpr,
    HEAD_DIM: tl.constexpr,
):
    row_start = tl.program_id(0)
    row_steps = tl.num_programs(0)
    core_loops = q_loops * bh_loops
    for core_idx in range(row_start, core_loops, row_steps):
        q_idx = core_idx // bh_loops
        bh_idx = core_idx % bh_loops
        b = bh_idx // num_q_heads
        h = bh_idx % num_q_heads

        q_ptr_base = q_ptr + b * stride_q_0 + h * stride_q_1
        k_ptr_base = k_ptr + b * stride_k_0 + h * stride_k_1
        v_ptr_base = v_ptr + b * stride_v_0 + h * stride_v_1

        offs_m = q_idx * BLOCK_M + tl.arange(0, BLOCK_M)
        offs_head_dim = tl.arange(0, HEAD_DIM)

        Q = tl.load(q_ptr_base + offs_m[:, None] * stride_q_2 + offs_head_dim[None, :] * stride_q_3, mask=offs_m[:, None] < q_len, other=0.0)

        m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32)
        l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
        acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)

        num_blocks = tl.cdiv(kv_len, BLOCK_N)
        for k_idx in range(0, num_blocks):
            offs_n = k_idx * BLOCK_N + tl.arange(0, BLOCK_N)

            K = tl.load(k_ptr_base + offs_n[:, None] * stride_k_2 + offs_head_dim[None, :] * stride_k_3, mask=offs_n[:, None] < kv_len, other=0.0)

            V = tl.load(v_ptr_base + offs_n[:, None] * stride_v_2 + offs_head_dim[None, :] * stride_v_3, mask=offs_n[:, None] < kv_len, other=0.0)

            qk = tl.zeros(shape=(BLOCK_M, BLOCK_N), dtype=tl.float32)

            K_trans = tl.trans(K)
            qk += tl.dot(Q, K_trans)

            m_ij = tl.max(qk, axis=1)
            m_j = tl.maximum(m_i, m_ij)
            P = tl.exp(qk - m_j[:, None])
            l_j = tl.sum(P, axis=1)
            alpha = tl.exp(m_i - m_j)
            acc = acc * alpha[:, None]
            l_i = l_i * alpha + l_j
            m_i = m_j

            V = V.to(P.dtype)
            acc += tl.dot(P, V)

        acc = acc / l_i[:, None]

        off_o = b * stride_o_0 + h * stride_o_1 + offs_m[:, None] * stride_o_2 + offs_head_dim[None, :] * stride_o_3
        tl.store(out_ptr + off_o, acc, mask=offs_m[:, None] < q_len)

def flash_attn(
    q, k, v
):
    q = q.contiguous()
    k = k.contiguous()
    v = v.contiguous()

    out = torch.empty_like(q)

    prop = get_soc_info()
    num_aicore = prop["num_aicore"]
    num_vectorcore = prop["num_vectorcore"]

    grid_cube = (num_aicore, 1, 1)

    BLOCK_M = 16
    BLOCK_N = 16
    HEAD_DIM = q.shape[3]

    _forward[grid_cube](
        q, k, v, out,
        q.stride(0), q.stride(1), q.stride(2), q.stride(3),
        k.stride(0), k.stride(1), k.stride(2), k.stride(3),
        v.stride(0), v.stride(1), v.stride(2), v.stride(3),
        out.stride(0), out.stride(1), out.stride(2), out.stride(3),
        q.shape[0], q.shape[1], k.shape[1], q.shape[2], k.shape[2], q.shape[3],
        triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1],
        BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, HEAD_DIM=HEAD_DIM
    )

    return out
