import os
import torch, torch.library
from torch.utils.cpp_extension import load
from torch.compiler import allow_in_graph

current_dir = os.path.dirname(os.path.abspath(__file__))

os.environ['PYTORCH_ROCM_ARCH'] = 'gfx942'
gpu_module = load(
    name="qwen2_fast",
    sources=[os.path.join(current_dir, "qwen2_fast.cu")],
    verbose=True
)

@torch.library.custom_op("qwen_ops::mlp", mutates_args=())
def raw_mlp(
    input: torch.Tensor,
    gate_up_proj: torch.Tensor,
    down_proj: torch.Tensor,
    n: int
) -> torch.Tensor:
    input_shape = input.size()
    if len(input_shape) > 2:
        input = input.view(-1, input_shape[-1])
    m = input.size(0)
    k = input.size(1)
    if n == 0:
        n = down_proj.size(1)
        gate_up_proj_0 = gpu_module.permute(gate_up_proj)
        down_proj_0 = gpu_module.permute(down_proj)
        gate_up_proj.copy_(gate_up_proj_0.view(gate_up_proj.size()))
        down_proj.copy_(down_proj_0.view(down_proj.size()))


    input_0 = gpu_module.permute(input)
    res = gpu_module.mlp(input_0, gate_up_proj, down_proj, m, n, k)
    if res.size(0) > input.size(0):
        res = res[:input.size(0), :]
    if len(input_shape) > 2:
        res = res.view(input_shape)
    return res

@torch.library.register_fake("qwen_ops::mlp")
def raw_mlp_fake(
    input: torch.Tensor,
    gate_up_proj: torch.Tensor,
    down_proj: torch.Tensor,
    n: int
) -> torch.Tensor:
    return input

# @torch.library.custom_op("qwen_ops::permute", mutates_args=())
# def raw_permute(
#     input: torch.Tensor
# ) -> torch.Tensor:
#     return gpu_module.permute(input)

# @torch.library.register_fake("qwen_ops::permute")
# def raw_permute_fake(
#     input: torch.Tensor,
# ) -> torch.Tensor:
#     m = (input.size(0) + 31) // 32
#     k = (input.size(1) + 8) // 32
#     return torch.empty((m, k), dtype=input.dtype, device=input.device)
 


mlp = torch.ops.qwen_ops.mlp