import torch
import torch.nn.functional as F
import numpy as np

def sliding_window_k_quantile(matrix, window_width=16, k=0.5):
    """
    对输入矩阵按行进行滑动窗口 K 分位数计算
    
    参数:
        matrix: 输入矩阵，形状为 (rows, cols)
        window_width: 滑动窗口大小 width
        k: 分位数参数 (0 <= k <= 1)
    
    返回:
        结果矩阵，形状为 (rows, cols - window_width + 1)
    """
    rows, cols = matrix.shape
    result_cols = cols - window_width + 1
    result = np.zeros((rows, result_cols))
    
    for i in range(rows):
        for j in range(result_cols):
            window = matrix[i, j:j+window_width]
            result[i, j] = np.quantile(window, k)
    
    return result

def sliding_window_k_quantile_torch(matrix, window_width=16, k=0.5, device='cuda'):
    """
    对输入矩阵按行进行滑动窗口 K 分位数计算 (PyTorch CUDA 加速版本)
    
    参数:
        matrix: 输入矩阵，形状为 (rows, cols)，可以是 numpy 数组或 torch 张量
        window_width: 滑动窗口大小
        k: 分位数参数 (0 <= k <= 1)
        device: 计算设备 ('cuda' 或 'cpu')
    
    返回:
        结果矩阵，形状为 (rows, cols - window_width + 1)
    """
    # 将输入转换为 torch 张量并移到指定设备
    if not isinstance(matrix, torch.Tensor):
        matrix = torch.from_numpy(matrix).float()
    matrix = matrix.to(device)
    
    rows, cols = matrix.shape
    result_cols = cols - window_width + 1
    
    # 使用 unfold 创建滑动窗口视图
    # shape: (rows, result_cols, window_width)
    windows = matrix.unfold(1, window_width, 1)
    
    # 计算分位数
    result = torch.quantile(windows, k, dim=2)
    
    return result.cpu().numpy() if device == 'cuda' else result.numpy()

# 示例使用
if __name__ == "__main__":
    import numpy as np
    import time
    
    # 生成一个 1024x1024 的随机矩阵
    np.random.seed(42)
    matrix = np.random.rand(1024, 1024)
    
    # 测试原始实现
    # start = time.time()
    # result_np = sliding_window_k_quantile(matrix, window_width=16, k=0.5)
    # print(f"NumPy Time: {time.time() - start:.4f}秒")
    
    # 测试 PyTorch CUDA 实现
    for i in range(10):
        start = time.time()
        result_torch = sliding_window_k_quantile_torch(matrix, window_width=16, k=0.5)
        print(f"PyTorch CUDA Time: {time.time() - start:.4f}秒")
    
    # 验证结果一致性
    # print("Data Diff:", np.max(np.abs(result_np - result_torch)))