import torch
import torch.nn as nn
from torchinfo import summary

from models.conv_lstm.conv_lstm_cell import ConvLSTMCell


class EncoderDecoderConvLSTM(nn.Module):
    def __init__(self, input_shape=(480, 560)):
        super().__init__()
        # 编码器部分
        self.encoder = nn.Sequential(
            nn.Conv3d(1, 64, kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)),
            nn.BatchNorm3d(64),
            nn.LeakyReLU(),
            nn.Conv3d(64, 128, kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)),
            nn.BatchNorm3d(128),
            nn.LeakyReLU(),
        )

        # ConvLSTM核心
        self.convlstm = ConvLSTMCell(
            input_dim=128,
            hidden_dim=256,
            kernel_size=(5, 5)
        )

        # 解码器部分
        self.decoder = nn.Sequential(
            nn.ConvTranspose3d(256, 128, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2)),
            nn.BatchNorm3d(128),
            nn.LeakyReLU(),
            nn.ConvTranspose3d(128, 64, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2)),
            nn.BatchNorm3d(64),
            nn.LeakyReLU(),
            # nn.ConvTranspose3d(64, 1, kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)), # 这段代码有问题 无法还原到 (4, 1, 20, 480, 560)
            nn.ConvTranspose3d(64, 1, kernel_size=1, stride=1, padding=0),
            nn.Sigmoid()
        )

    def forward(self, x, pred_frames=20):
        # x shape: (B, 1, T=20, H, W)
        batch_size, _, seq_len, h, w = x.shape

        # 编码器处理
        encoded = self.encoder(x)  # (B,128,T,120,140)

        # 初始化ConvLSTM状态
        h_state, c_state = self._init_hidden(
            batch_size=batch_size,
            image_size=(encoded.shape[-2], encoded.shape[-1])
        )

        # 时空特征提取
        convlstm_outputs = []
        for t in range(seq_len):
            h_state, c_state = self.convlstm(
                input_tensor=encoded[:, :, t],  # 当前时刻特征
                cur_state=[h_state, c_state]
            )
            convlstm_outputs.append(h_state)

        # 预测未来帧
        decoder_input = torch.stack(convlstm_outputs[-pred_frames:], dim=2)
        outputs = self.decoder(decoder_input)  # (B,1,20,480,560)

        return outputs

    def _init_hidden(self, batch_size, image_size):
        h = torch.zeros(batch_size, 256, image_size[0], image_size[1]).to(self.device)
        c = torch.zeros_like(h)
        return h, c

    @property
    def device(self):
        return next(self.parameters()).device

if __name__ == "__main__":
    model = EncoderDecoderConvLSTM().cuda()
    summary(model, input_size=(4, 1, 20, 480, 560))
    print("----------------------------------")