import torch
import torch.nn as nn
from torchinfo import summary


class Down(nn.Module):

    def __init__(self,in_channels,out_channels,kernel_size,stride=None,padding=0,padding_mode="zeros"):
        super(Down, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, padding_mode=padding_mode, bias=False),
            nn.BatchNorm3d(out_channels),
            # nn.ReLU(inplace=False),
            nn.LeakyReLU(inplace=False),
        )

    def forward(self, x):
        return self.conv(x)


class Upsampling(nn.Module):

    def __init__(self,in_channels,out_channels,kernel_size,stride=None,padding=0,padding_mode="zeros",output_padding=0):
        super(Upsampling, self).__init__()
        self.conv = nn.Sequential(
            nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,padding=padding,  output_padding=output_padding ),  # 输出填充（解决尺寸对齐问题）
            nn.BatchNorm3d(out_channels),
            # nn.ReLU(inplace=False),
            nn.LeakyReLU(inplace=False),
        )

    def forward(self, x):
        return self.conv(x)



class MyModel(nn.Module):

    def __init__(self):
        super(MyModel, self).__init__()
        lstm_input_size = 18 * 9 * 8


        self.down_sequential = nn.Sequential(
            Down(1,3,3,stride=(1,2,2),padding= (3 - 1) // 2),
            Down(3,6,3,stride=(1,2,2),padding= (3 - 1) // 2),
            Down(6,9,3,stride=(1,2,2),padding= (3 - 1) // 2),
            Down(9,12,3,stride=(1,2,2),padding= (3 - 1) // 2),
            Down(12,15,3,stride=(1,2,2),padding= (3 - 1) // 2),
            Down(15,18,3,stride=(1,2,2),padding= (3 - 1) // 2),
        )


        self.lstm1 = nn.LSTM(input_size=lstm_input_size, num_layers=3, batch_first=True, dropout=0.1,
                             hidden_size=lstm_input_size)
        self.relu1 = nn.ReLU()
        self.linear1 = nn.Linear(lstm_input_size, lstm_input_size, bias=True)
        self.relu2 = nn.ReLU()
        self.lstm2 = nn.LSTM(input_size=lstm_input_size, num_layers=3, batch_first=True, dropout=0.1,
                             hidden_size=lstm_input_size)
        self.linear2 = nn.Linear(lstm_input_size, lstm_input_size, bias=True)
        self.relu3 = nn.ReLU()


        self.upsampling_sequential = nn.Sequential(

            Upsampling(18, 15, (1, 2, 1), stride=(1, 1, 1), padding=(0, 0, 0)), # [10, 9, 20, 10, 8]
            Upsampling(15, 12, (1, 2, 2), stride=(1, 2, 2)), # [10, 9, 20, 20, 16]
            Upsampling(12, 9, (1, 7, 15), stride=(1, 7, 15)), # [10, 9, 20, 140, 8]
            Upsampling(9, 6, (1, 2, 2), stride=(1, 2, 2)), # [10, 9, 20, 10, 8]
            Upsampling(6, 3, (1, 2, 1), stride=(1, 2, 1)), # [10, 9, 20, 10, 8]
            Upsampling(3, 1, (1, 1, 1), stride=(1, 1, 1)), # [10, 9, 20, 10, 8]
        )




    def forward(self, x):
        x= self.down_sequential(x)  # torch.Size([10, 18, 20, 9, 8])
        x = torch.reshape(x,(x.shape[0],x.shape[2],x.shape[1]*x.shape[3]*x.shape[4]))

        # x, (hn, cn) =self.lstm1(x)

        x, (hn, cn) =self.lstm1(x)
        residual = x #保存第1层输出作为残差
        x = self.relu1(x)
        x = self.linear1(x)
        x = self.relu2(x)
        x, (hn, cn) =self.lstm2(x)
        x = x + residual  # 添加残差连接
        x = self.linear2(x)
        x = self.relu3(x)
        x = torch.reshape(x, (x.shape[0], 18, x.shape[1], 9, 8)) # torch.Size([10, 18, 20, 9, 8])
        x = self.upsampling_sequential(x) # output_size=(input_size−1)×stride+kernel_size−2×padding
        return x

if __name__ == '__main__':

    # 定义输入数据的形状
    N = 10    # 批次大小
    C_in = 1  # 输入通道数（例如 RGB 图像）
    depth = 20
    H_in = 560 # 输入特征图的高度
    W_in = 480 # 输入特征图的宽度
    # 生成随机四维张量
    input_tensor = torch.randn(N, C_in,depth, H_in, W_in).to(torch.device('cuda'))

    model = MyModel()
    model.to(torch.device('cuda'))
    summary(model, input_size=input_tensor.shape)
    output = model(input_tensor)

    print(output.shape)
    # torch.reshape(output, (N, C_in, depth,H_in, W_in))

