import torch
import torch.nn.functional as F
from utils import *


def get_deconv_output_size(input_size, kernel_size, stride, padding, output_padding):
    return (input_size - 1) * stride + kernel_size - 2 * padding + output_padding


def calculate_padding_extension(kernel_size, stride):
    """计算输入块需要扩展的大小以确保输出块无缝拼接"""
    # 计算每个方向需要扩展的量
    h_ext = (kernel_size[0] - 1) // stride[0]
    w_ext = (kernel_size[1] - 1) // stride[1]
    return h_ext, w_ext


def blocked_conv_transpose2d(
    input,
    weight,
    bias=None,
    stride=1,
    padding=0,
    output_padding=0,
    max_h=None,
    max_w=None,
):
    # 解析参数
    stride_h, stride_w = (stride, stride) if isinstance(stride, int) else stride
    pad_h, pad_w = (padding, padding) if isinstance(padding, int) else padding
    opad_h, opad_w = (
        (output_padding, output_padding)
        if isinstance(output_padding, int)
        else output_padding
    )
    Kh, Kw = weight.shape[2], weight.shape[3]

    N, C_in, H_in, W_in = input.shape
    C_out = weight.shape[1]

    # 计算全局输出尺寸
    H_out = get_deconv_output_size(H_in, Kh, stride_h, pad_h, opad_h)
    W_out = get_deconv_output_size(W_in, Kw, stride_w, pad_w, opad_w)

    # 创建输出张量
    output = torch.zeros(
        (N, C_out, H_out, W_out), dtype=input.dtype, device=input.device
    )

    # 计算输入块需要扩展的大小
    h_ext, w_ext = calculate_padding_extension((Kh, Kw), (stride_h, stride_w))
    # 迭代处理每个块
    for i in range(0, H_in, max_h):
        # 确定当前块的实际大小
        h_block = min(max_h, H_in - i)
        # 计算输入块边界（带扩展）
        h_start_in = max(0, i - h_ext)
        h_end_in = min(H_in, i + h_block + h_ext)
        # 计算实际扩展量
        actual_ext_left = i - h_start_in
        actual_ext_right = h_end_in - (i + h_block)
        # 创建扩展的输入块
        h_in_block = h_end_in - h_start_in
        assert h_in_block <= 2 * h_ext + max_h
        for j in range(0, W_in, max_w):
            w_block = min(max_w, W_in - j)
            w_start_in = max(0, j - w_ext)
            w_end_in = min(W_in, j + w_block + w_ext)
            actual_ext_top = j - w_start_in
            actual_ext_bottom = w_end_in - (j + w_block)
            w_in_block = w_end_in - w_start_in
            assert w_in_block <= 2 * w_ext + max_w
            input_block = torch.zeros(
                (N, C_in, h_in_block, w_in_block),
                dtype=input.dtype,
                device=input.device,
            )

            # 将原始输入复制到扩展块的中央
            copy_tensor_region_nchw(
                src=input,
                dst=input_block,
                src_h_start=h_start_in,
                src_h_end=h_end_in,
                src_w_start=w_start_in,
                src_w_end=w_end_in,
                dst_h_start=0,
                dst_h_end=h_end_in - h_start_in,
                dst_w_start=0,
                dst_w_end=w_end_in - w_start_in,
            )

            # 应用转置卷积
            output_block = F.conv_transpose2d(
                input=input_block,
                weight=weight,
                bias=bias,
                stride=(stride_h, stride_w),
                padding=0,
                output_padding=0,
            )

            # 计算输出块在全局输出中的位置
            global_h_start = i * stride_h - pad_h
            global_w_start = j * stride_w - pad_w

            # 计算有效输出区域（去除扩展部分的影响）
            valid_h_start = actual_ext_left * stride_h
            valid_h_end = output_block.size(2) - actual_ext_right * stride_h
            valid_w_start = actual_ext_top * stride_w
            valid_w_end = output_block.size(3) - actual_ext_bottom * stride_w

            # 计算全局输出位置（考虑边界）
            global_valid_h_start = max(0, global_h_start)
            global_valid_w_start = max(0, global_w_start)
            global_valid_h_end = min(
                H_out, global_h_start + valid_h_end - valid_h_start
            )
            global_valid_w_end = min(
                W_out, global_w_start + valid_w_end - valid_w_start
            )

            # 计算源区域在输出块中的位置
            src_valid_h_start = valid_h_start + (global_valid_h_start - global_h_start)
            src_valid_w_start = valid_w_start + (global_valid_w_start - global_w_start)
            src_valid_h_end = src_valid_h_start + (
                global_valid_h_end - global_valid_h_start
            )
            src_valid_w_end = src_valid_w_start + (
                global_valid_w_end - global_valid_w_start
            )

            # 只将有效部分复制到全局输出
            if (src_valid_h_end > src_valid_h_start) and (
                src_valid_w_end > src_valid_w_start
            ):
                copy_tensor_region_nchw(
                    src=output_block,
                    dst=output,
                    src_h_start=src_valid_h_start,
                    src_h_end=src_valid_h_end,
                    src_w_start=src_valid_w_start,
                    src_w_end=src_valid_w_end,
                    dst_h_start=global_valid_h_start,
                    dst_h_end=global_valid_h_end,
                    dst_w_start=global_valid_w_start,
                    dst_w_end=global_valid_w_end,
                )

    return output


# 测试配置保持不变
test_configs = [
    {
        "name": "small_input",
        "ic": 1,
        "oc": 1,
        "input_size": (40, 40),
        "kernel_size": (3, 3),
        "stride": 1,
        "padding": 1,
        "max_hw": (3, 3),
    },
    {
        "name": "even_stride",
        "ic": 2,
        "oc": 3,
        "input_size": (60, 60),
        "kernel_size": (3, 3),
        "stride": 2,
        "padding": 1,
        "max_hw": (3, 3),
    },
    {
        "name": "even_kernel",
        "ic": 2,
        "oc": 3,
        "input_size": (60, 60),
        "kernel_size": (2, 2),
        "stride": 2,
        "padding": 1,
        "max_hw": (3, 3),
    },
    {
        "name": "large_kernel",
        "ic": 1,
        "oc": 1,
        "input_size": (50, 50),
        "kernel_size": (5, 5),
        "stride": 1,
        "padding": 2,
        "max_hw": (3, 3),
    },
    {
        "name": "small_kernel",
        "ic": 2,
        "oc": 3,
        "input_size": (50, 50),
        "kernel_size": (1, 1),
        "stride": 1,
        "padding": 0,
        "max_hw": (3, 3),
    },
    {
        "name": "edge_case",
        "ic": 3,
        "oc": 3,
        "input_size": (70, 70),
        "kernel_size": (3, 3),
        "stride": 3,
        "padding": 0,
        "max_hw": (4, 4),
    },
]


def run_single_test(cfg):
    """执行单个测试用例"""
    torch.manual_seed(0)

    # 生成测试数据
    input = torch.randn(1, cfg["ic"], *cfg["input_size"])
    # 注意：转置卷积的权重形状是[in_channels, out_channels, kH, kW]
    weight = torch.randn(cfg["ic"], cfg["oc"], *cfg["kernel_size"])

    # 标准实现
    output_std = F.conv_transpose2d(
        input, weight, stride=cfg["stride"], padding=cfg["padding"]
    )

    # 分块实现
    output_split = blocked_conv_transpose2d(
        input,
        weight,
        None,
        stride=cfg["stride"],
        padding=cfg["padding"],
        max_h=cfg["max_hw"][0],
        max_w=cfg["max_hw"][1],
    )

    # 计算差异
    diff = (output_std - output_split).abs().max().item()
    return diff


def test_all_configs():
    """测试所有配置"""
    for cfg in test_configs:
        diff = run_single_test(cfg)
        print(f"Test [{cfg['name']}] Max diff: {diff:.2e}", end="")
        if diff < 1e-5:
            print(" ✓")
        else:
            print(" ✗")
            # 打印调试信息
            print(f"  Input size: {cfg['input_size']}, Kernel: {cfg['kernel_size']}")
            print(f"  Stride: {cfg['stride']}, Padding: {cfg['padding']}")
            print(f"  Block size: {cfg['max_hw']}")

    print("所有测试完成!")


if __name__ == "__main__":
    test_all_configs()
