import torch
import torch.nn.functional as F
from utils import *

# 计算输出尺寸公式（考虑padding）
def conv_output_size(padded_size, kernel_size, s, d):
    return (padded_size - d * (kernel_size - 1) - 1) // s + 1

def blocked_conv2d(
    input,      # 输入张量 (N, C_in, H_in, W_in)
    weight,     # 卷积核 (C_out, C_in, Kh, Kw)
    bias=None,  # 偏置项 (可选)
    stride=1,   # 步长 (int或tuple)
    padding=0,  # 填充 (int或tuple)
    dilation=1, # 膨胀系数 (int或tuple)
    groups=1,   # 分组数
    max_h=None, # 输入块最大高度
    max_w=None, # 输入块最大宽度
):
    N, C_in, H_in, W_in = input.shape
    C_out, _, kernel_h, kernel_w = weight.shape

    # 参数解析
    stride_h, stride_w = (stride, stride) if isinstance(stride, int) else stride
    dilation_h, dilation_w = (
        (dilation, dilation) if isinstance(dilation, int) else dilation
    )
    pad_h, pad_w = (padding, padding) if isinstance(padding, int) else padding

    # 计算实际输入尺寸（包含padding）
    H_padded = H_in + 2 * pad_h
    W_padded = W_in + 2 * pad_w

    H_out = conv_output_size(H_padded, kernel_h, stride_h, dilation_h)
    W_out = conv_output_size(W_padded, kernel_w, stride_w, dilation_w)

    # 计算最大输出块尺寸
    H_out_block_max = conv_output_size(max_h, kernel_h, stride_h, dilation_h)
    W_out_block_max = conv_output_size(max_w, kernel_w, stride_w, dilation_w)

    output = torch.zeros((N, C_out, H_out, W_out), device=input.device)

    # 分块卷积主循环（改为for循环）
    for h_out_start in range(0, H_out, H_out_block_max):
        h_out_end = min(h_out_start + H_out_block_max, H_out)
        current_h_out = h_out_end - h_out_start

        # 计算输入块参数（考虑padding偏移）
        h_in_start = h_out_start * stride_h - pad_h
        h_in_block = (current_h_out - 1) * stride_h + dilation_h * (kernel_h - 1) + 1
        assert h_in_block <= max_h
        h_in_end = h_in_start + h_in_block

        for w_out_start in range(0, W_out, W_out_block_max):
            w_out_end = min(w_out_start + W_out_block_max, W_out)
            current_w_out = w_out_end - w_out_start

            # 计算输入块参数（考虑padding偏移）
            w_in_start = w_out_start * stride_w - pad_w
            w_in_block = (
                (current_w_out - 1) * stride_w + dilation_w * (kernel_w - 1) + 1
            )
            assert w_in_block <= max_w
            w_in_end = w_in_start + w_in_block
            # 创建临时输入块（自动处理边界补零）
            input_block = torch.zeros(
                (N, C_in, h_in_block, w_in_block), device=input.device
            )

            # 计算实际拷贝范围（处理padding带来的负索引）
            copy_h_start = max(h_in_start, 0)
            copy_h_end = min(h_in_end, H_in)
            copy_w_start = max(w_in_start, 0)
            copy_w_end = min(w_in_end, W_in)

            # 计算目标位置偏移量
            dst_h_offset = copy_h_start - h_in_start
            dst_w_offset = copy_w_start - w_in_start

            # 执行拷贝（自动处理边界外的零值）
            if copy_h_end > copy_h_start and copy_w_end > copy_w_start:
                copy_tensor_region_nchw(
                    src=input,
                    dst=input_block,
                    src_h_start=copy_h_start,
                    src_h_end=copy_h_end,
                    src_w_start=copy_w_start,
                    src_w_end=copy_w_end,
                    dst_h_start=dst_h_offset,
                    dst_h_end=dst_h_offset + (copy_h_end - copy_h_start),
                    dst_w_start=dst_w_offset,
                    dst_w_end=dst_w_offset + (copy_w_end - copy_w_start),
                )

            # 执行卷积
            output_block = F.conv2d(
                input_block,
                weight,
                bias,
                stride=stride,
                padding=0,  # 已在拷贝时处理padding
                dilation=dilation,
                groups=groups,
            )

            # 拷贝结果到输出
            copy_tensor_region_nchw(
                src=output_block,
                dst=output,
                src_h_start=0,
                src_h_end=current_h_out,
                src_w_start=0,
                src_w_end=current_w_out,
                dst_h_start=h_out_start,
                dst_h_end=h_out_end,
                dst_w_start=w_out_start,
                dst_w_end=w_out_end,
            )

    return output


test_configs = [
    # 基础测试组
    {
        "name": "basic_no_padding",
        "ic": 1,
        "oc": 1,
        "input_size": (4, 4),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 0,
        "dilation": 1,
        "max_hw": (3, 3),
        "description": "基础无padding测试",
    },
    {
        "name": "basic_with_padding",
        "ic": 2,
        "oc": 2,
        "input_size": (5, 5),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (4, 4),
        "description": "基础带padding测试",
    },
    # 步长测试组
    {
        "name": "stride_2_no_padding",
        "ic": 1,
        "oc": 1,
        "input_size": (6, 6),
        "kernel_size": (3, 3),
        "stride": 2,
        "padding": 0,
        "dilation": 1,
        "max_hw": (3, 3),
        "description": "步长2无padding测试",
    },
    {
        "name": "stride_2_with_padding",
        "ic": 3,
        "oc": 3,
        "input_size": (7, 7),
        "kernel_size": (3, 3),
        "stride": 2,
        "padding": 1,
        "dilation": 1,
        "max_hw": (4, 4),
        "description": "步长2带padding测试",
    },
    # 空洞卷积测试组
    {
        "name": "dilation_2_no_padding",
        "ic": 1,
        "oc": 1,
        "input_size": (5, 5),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 0,
        "dilation": 2,
        "max_hw": (5, 5),
        "description": "空洞2无padding测试",
    },
    {
        "name": "dilation_2_with_padding",
        "ic": 2,
        "oc": 2,
        "input_size": (6, 6),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 2,
        "dilation": 2,
        "max_hw": (6, 6),
        "description": "空洞2带padding测试",
    },
    # 大核测试组
    {
        "name": "large_kernel_5x5",
        "ic": 1,
        "oc": 1,
        "input_size": (10, 10),
        "kernel_size": (5, 5),
        "stride": 1,
        "padding": 2,
        "dilation": 1,
        "max_hw": (5, 5),
        "description": "5x5大卷积核测试",
    },
    {
        "name": "large_kernel_7x7",
        "ic": 1,
        "oc": 1,
        "input_size": (16, 16),
        "kernel_size": (7, 7),
        "stride": 1,
        "padding": 3,
        "dilation": 1,
        "max_hw": (7, 7),
        "description": "7x7大卷积核测试",
    },
    # 通道数测试组
    {
        "name": "multi_channel_4x4",
        "ic": 4,
        "oc": 4,
        "input_size": (5, 5),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (3, 3),
        "description": "多通道(4)测试",
    },
    {
        "name": "multi_channel_8x8",
        "ic": 8,
        "oc": 8,
        "input_size": (6, 6),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (4, 4),
        "description": "多通道(8)测试",
    },
    # 边界情况测试组
    {
        "name": "minimal_input_1x1",
        "ic": 1,
        "oc": 1,
        "input_size": (1, 1),
        "kernel_size": (1, 1),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (1, 1),
        "description": "最小输入1x1测试",
    },
    {
        "name": "non_square_kernel",
        "ic": 1,
        "oc": 1,
        "input_size": (10, 10),
        "kernel_size": (3, 5),
        "stride": 1,
        "padding": (1, 2),
        "dilation": 1,
        "max_hw": (3, 5),
        "description": "非方形核(3x5)测试",
    },
    # 极端分块测试组
    {
        "name": "small_block_2x2",
        "ic": 1,
        "oc": 1,
        "input_size": (6, 6),
        "kernel_size": (2, 2),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (2, 2),
        "description": "极小分块(2x2)测试",
    },
    {
        "name": "uneven_block_3x5",
        "ic": 1,
        "oc": 1,
        "input_size": (7, 7),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 1,
        "dilation": 1,
        "max_hw": (3, 5),
        "description": "非对称分块(3x5)测试",
    },
]


def run_single_test(cfg):
    torch.manual_seed(0)
    input = torch.randn(1, cfg["ic"], *cfg["input_size"])
    weight = torch.randn(cfg["oc"], cfg["ic"], *cfg["kernel_size"])
    bias = torch.randn(cfg["oc"])

    # 标准卷积
    output_std = F.conv2d(
        input,
        weight,
        bias,
        stride=cfg["stride"],
        padding=cfg["padding"],
        dilation=cfg["dilation"],
    )

    # 分块卷积
    output_split = blocked_conv2d(
        input,
        weight,
        bias,
        stride=cfg["stride"],
        padding=cfg["padding"],
        dilation=cfg["dilation"],
        max_h=cfg["max_hw"][0],
        max_w=cfg["max_hw"][1],
    )

    return (output_std - output_split).abs().max().item()


def test_all_configs():
    for cfg in test_configs:
        diff = run_single_test(cfg)
        print(f"Test {cfg['name']} Max diff: {diff:.2e}", end="")
        assert diff < 1e-5, f"失败: {cfg['name']}"
        print(" ✓")
    print("所有测试通过!")


if __name__ == "__main__":
    test_all_configs()

# # 测试对比
# def test_conv():
#     torch.manual_seed(0)
#     # # 生成输入和卷积参数
#     oc, ic = 6, 6
#     input = torch.randn(1, ic, 30, 30)
#     weight = torch.randn(oc, ic, 3, 3)
#     bias = torch.randn(oc)
#     padding = 3
#     stride = 1
#     dilation = 1
#     max_h, max_w = 20, 20

#     # 标准卷积结果
#     output_standard = F.conv2d(
#         input, weight, bias, stride=1, dilation=dilation, padding=padding
#     )
#     # 分块卷积结果
#     output_split = blocked_conv2d(
#         input,
#         weight,
#         bias,
#         stride=stride,
#         padding=padding,
#         dilation=dilation,
#         max_h=max_h,
#         max_w=max_w,
#     )
#     # 对比差异
#     diff = (output_standard - output_split).abs().max()
#     print(f"最大差异: {diff.item()}, {output_standard.abs().max()}")
#     assert diff < 1e-5, "分块卷积结果与标准卷积不一致"


# test_conv()
