import torch
import torch.nn as nn
from torchinfo import summary


class Down(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv3d(in_channels, out_channels, kernel_size=3,
                      stride=(1, 2, 2), padding=1, bias=False),
            nn.BatchNorm3d(out_channels),
            nn.LeakyReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv(x)


class Up(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.up = nn.Sequential(
            nn.ConvTranspose3d(in_channels, out_channels,
                               kernel_size=(1, 2, 2), stride=(1, 2, 2),
                               padding=1, output_padding=(0, 1, 1)),
            nn.BatchNorm3d(out_channels),
            nn.LeakyReLU(inplace=True)
        )

    def forward(self, x):
        return self.up(x)


class MyModel(nn.Module):
    def __init__(self):
        super().__init__()

        # 下采样
        self.down = nn.Sequential(
            Down(1, 8),
            Down(8, 16),
            Down(16, 32),
            Down(32, 64),
            Down(64, 128),
            Down(128, 256)
        )

        # LSTM
        self.lstm = nn.LSTM(
            input_size=256 * 9 * 8,  # 根据实际下采样后的空间维度调整
            hidden_size=512,
            num_layers=1,
            batch_first=True
        )

        # 上采样
        self.up = nn.Sequential(
            Up(256, 128),
            Up(128, 64),
            Up(64, 32),
            Up(32, 16),
            Up(16, 8),
            nn.Conv3d(8, 1, kernel_size=1)
        )

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
            elif isinstance(m, nn.LSTM):
                for name, param in m.named_parameters():
                    if 'weight_ih' in name:
                        nn.init.xavier_normal_(param)
                    elif 'weight_hh' in name:
                        nn.init.orthogonal_(param)

    def forward(self, x):
        # 下采样
        x = self.down(x)  # [N,256,20,9,8]

        # LSTM处理
        N, C, D, H, W = x.shape
        x = x.permute(0, 2, 1, 3, 4).reshape(N, D, -1)  # [N,20,256*9*8]
        x, _ = self.lstm(x)
        x = x.reshape(N, D, C, H, W).permute(0, 2, 1, 3, 4)

        # 上采样
        x = self.up(x)
        return x


if __name__ == '__main__':

    # 定义输入数据的形状
    N = 10    # 批次大小
    C_in = 1  # 输入通道数（例如 RGB 图像）
    depth = 20
    H_in = 560 # 输入特征图的高度
    W_in = 480 # 输入特征图的宽度
    # 生成随机四维张量
    input_tensor = torch.randn(N, C_in,depth, H_in, W_in).to(torch.device('cuda'))

    model = MyModel()
    model.to(torch.device('cuda'))
    summary(model, input_size=input_tensor.shape)
    output = model(input_tensor)

    print(output.shape)
    # torch.reshape(output, (N, C_in, depth,H_in, W_in))


