import torch
import torch.nn as nn

class Down(nn.Module):

    def __init__(self,in_channels,out_channels,kernel_size,stride,padding=0,padding_mode="zeros"):
        super(Down, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, padding_mode=padding_mode, bias=False),
            nn.BatchNorm2d(out_channels),
            # nn.ReLU(inplace=False),
            nn.LeakyReLU(inplace=False),
        )

    def forward(self, x):
        return self.conv(x)

class MyModel(nn.Module):

    def __init__(self):
        super(MyModel, self).__init__()
        self.sequential = nn.Sequential(
            Down(3,6,3,2,padding= (3 - 1) // 2),
            Down(6,9,3,2,padding= (3 - 1) // 2),
            Down(9,12,3,2,padding= (3 - 1) // 2),
            Down(12,15,3,2,padding= (3 - 1) // 2),
            Down(15,18,3,2,padding= (3 - 1) // 2),
            Down(18,18,3,2,padding= (3 - 1) // 2),
        )

    def forward(self, x):
        return self.sequential(x)


# 定义输入数据的形状
N = 10    # 批次大小
C_in = 3  # 输入通道数（例如 RGB 图像）
H_in = 560 # 输入特征图的高度
W_in = 480 # 输入特征图的宽度

# 生成随机四维张量
input_tensor = torch.randn(N, C_in, H_in, W_in).to(torch.device('cuda'))

model = MyModel()
model.to(torch.device('cuda'))
output = model(input_tensor)

print(output.shape)
torch.reshape(output, (N, C_in, H_in, W_in))

