"""
Triton工具函数和辅助类
"""

import torch
import triton
import triton.language as tl


@triton.jit
def _load_with_boundary_check(ptr, offsets, mask, other=0.0):
    """带边界检查的内存加载"""
    return tl.load(ptr + offsets, mask=mask, other=other)


@triton.jit
def _store_with_boundary_check(ptr, value, offsets, mask):
    """带边界检查的内存存储"""
    tl.store(ptr + offsets, value, mask=mask)


def next_power_of_2(n):
    """返回大于等于n的最小2的幂"""
    return 2 ** (int(n - 1).bit_length())


def get_cuda_autotune_config():
    """获取CUDA自动调优配置"""
    return [
        triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
        triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
        triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
        triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
        triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
        triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
        triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
        triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
    ]


class TritonModuleBase(torch.nn.Module):
    """Triton模块基类"""
    
    def __init__(self):
        super().__init__()
        self._triton_enabled = True
    
    def enable_triton(self):
        """启用Triton kernel"""
        self._triton_enabled = True
    
    def disable_triton(self):
        """禁用Triton kernel，使用PyTorch fallback"""
        self._triton_enabled = False
    
    @property
    def is_triton_enabled(self):
        return self._triton_enabled


def check_triton_compatible(tensor: torch.Tensor) -> bool:
    """检查tensor是否与Triton兼容"""
    if not tensor.is_cuda:
        return False
    if not tensor.is_contiguous():
        return False
    return True


def make_contiguous(tensor: torch.Tensor) -> torch.Tensor:
    """确保tensor是连续的"""
    if not tensor.is_contiguous():
        return tensor.contiguous()
    return tensor

