import numpy as np
import torch
import torch.nn.functional as F

def conv2d(input_data, weight, kernel_size, stride=1, padding=0):
    """
    2D convolution using numpy with integer inputs
    
    Args:
        input_data: Input tensor of shape (batch, height, width, channels) - int8 type
        weight: Weight tensor of shape (out_channels, kernel_h, kernel_w, in_channels) - int8 type
        kernel_size: Tuple (kernel_h, kernel_w) for validation
        stride: Stride for convolution
        padding: Padding size
    
    Returns:
        Output tensor after convolution - int32 type
    """

    input_data = input_data.astype(np.int32)
    weight = weight.astype(np.int32)
    
    batch, in_h, in_w, in_c = input_data.shape
    out_c, k_h, k_w, weight_in_c = weight.shape

    # Calculate output dimensions
    out_h = (in_h + 2 * padding - k_h) // stride + 1
    out_w = (in_w + 2 * padding - k_w) // stride + 1
    
    # Initialize output with int32 for accumulation
    output = np.zeros((batch, out_h, out_w, out_c), dtype=np.int32)
    
    # Perform convolution
    for b in range(batch):
        for oh in range(out_h):
            for ow in range(out_w):
                for oc in range(out_c):
                    for kh in range(k_h):
                        for kw in range(k_w):
                            for ic in range(in_c):
                                ih = oh * stride + kh - padding
                                iw = ow * stride + kw - padding
                                if 0 <= ih < input_data.shape[1] and 0 <= iw < input_data.shape[2]:
                                    print(input_data[b, ih, iw, ic], weight[oc, kh, kw, ic])
                                    output[b, oh, ow, oc] += (input_data[b, ih, iw, ic] * weight[oc, kh, kw, ic])
                    print(output[b, oh, ow, oc])
    return output

def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
    """
    Custom implementation of 2D transposed convolution that matches PyTorch's behavior.
    
    Args:
        input: Input tensor of shape (N, C_in, H_in, W_in)
        weight: Weight tensor of shape (C_in, C_out, kH, kW)
        bias: Optional bias tensor of shape (C_out,)
        stride: Stride of the convolution
        padding: Zero-padding added to both sides of the input
        output_padding: Additional size added to one side of the output shape
        groups: Number of blocked connections from input channels to output channels
        dilation: Spacing between kernel elements
    
    Returns:
        Output tensor of shape (N, C_out, H_out, W_out)
    """
    if isinstance(stride, int):
        stride = (stride, stride)
    if isinstance(padding, int):
        padding = (padding, padding)
    if isinstance(output_padding, int):
        output_padding = (output_padding, output_padding)
    if isinstance(dilation, int):
        dilation = (dilation, dilation)
    
    N, C_in, H_in, W_in = input.shape
    C_in_weight, C_out, kH, kW = weight.shape
    
    # Calculate output dimensions
    H_out = (H_in - 1) * stride[0] - 2 * padding[0] + dilation[0] * (kH - 1) + output_padding[0] + 1
    W_out = (W_in - 1) * stride[1] - 2 * padding[1] + dilation[1] * (kW - 1) + output_padding[1] + 1
    
    # Initialize output tensor
    output = torch.zeros(N, C_out, H_out, W_out, dtype=input.dtype, device=input.device)
    for n in range(N):
        for c_out in range(C_out):
            for h_out in range(H_out):
                for w_out in range(W_out):
                    acc = 0
                    for kh in range(kH):
                        for kw in range(kW):
                            h_temp = h_out + padding[0] - kh * dilation[0]
                            w_temp = w_out + padding[1] - kw * dilation[1]
                            if(h_temp >= 0 and h_temp % stride[0] == 0 and
                               w_temp >= 0 and w_temp % stride[1] == 0):
                                h_in = h_temp // stride[0]
                                w_in = w_temp // stride[1]
                                if 0 <= h_in < H_in and 0 <= w_in < W_in:
                                    for c_in in range(C_in):
                                        acc += input[n, c_in, h_in, w_in] * weight[c_in, c_out, kh, kw]
                    output[n, c_out, h_out, w_out] = acc
    return output

if __name__ == "__main__":
    # Create test case
    batch_size = 1
    input_height = 6
    input_width = 6
    input_channels = 1
    output_channels = 1
    kernel_size = (3, 3)
    stride = (2, 2)
    padding = (1, 1)
    output_padding = (1, 1)
    dilation = (1, 1)

    # 随机输入
    x_temp = torch.arange(batch_size * input_channels * input_height * input_width, dtype=torch.float32).reshape(batch_size, input_height, input_width, input_channels)
    x = x_temp.permute((0, 3, 1, 2))  # 转换为 (N, C, H, W) 格式
    weight = torch.arange(input_channels * output_channels * kernel_size[0] * kernel_size[1], dtype=torch.float32).reshape(input_channels, output_channels, kernel_size[0], kernel_size[1])
    bias = torch.zeros(output_channels, dtype=torch.float32)

    out_custom = conv_transpose2d(
        x, weight, bias=bias, stride=stride, padding=padding,
        output_padding=output_padding, dilation=dilation
    )
    print(out_custom)
    # PyTorch官方实现
    out_ref = F.conv_transpose2d(
        x, weight, bias=bias, stride=stride, padding=padding,
        output_padding=output_padding, dilation=dilation
    )
    print(out_ref.permute((0, 2, 3, 1)))  # 转换为 (N, H, W, C) 格式
    # 对比结果
    print("Max abs diff:", (out_custom - out_ref).abs().max().item())
    print("Mean abs diff:", (out_custom - out_ref).abs().mean().item())
    print("Shapes equal:", out_custom.shape == out_ref.shape)
    print("Allclose:", torch.allclose(out_custom, out_ref, atol=1e-5))
