import torch
import torch.nn as nn
from torch.nn import functional as F


# 自定义 ConvLSTM2d 模块
class ConvLSTM2d(nn.Module):
    def __init__(self, input_dim, hidden_dim, kernel_size, bias=True):
        super(ConvLSTM2d, self).__init__()
        self.hidden_dim = hidden_dim
        self.kernel_size = kernel_size
        padding = kernel_size // 2  # 保持空间分辨率不变

        # 输入门 (i)、遗忘门 (f)、细胞状态候选值 (g)、输出门 (o) 的卷积
        self.conv = nn.Conv2d(
            in_channels=input_dim + hidden_dim,
            out_channels=4 * hidden_dim,
            kernel_size=kernel_size,
            padding=padding,
            bias=bias
        )

    def forward(self, x, hidden_state=None):
        # x 形状: [B, T, C, H, W]（假设输入已调整为含时间步）
        batch_size, timesteps, _, height, width = x.shape
        outputs = []

        # 初始化隐藏状态（h 和 c）
        if hidden_state is None:
            h = torch.zeros(batch_size, self.hidden_dim, height, width).to(x.device)
            c = torch.zeros(batch_size, self.hidden_dim, height, width).to(x.device)
        else:
            h, c = hidden_state

        for t in range(timesteps):
            x_t = x[:, t, :, :, :]
            combined = torch.cat([x_t, h], dim=1)  # 沿通道维度拼接输入和隐藏状态
            gates = self.conv(combined)

            # 分割门控信号
            i, f, g, o = torch.split(gates, self.hidden_dim, dim=1)

            # 激活函数
            i = torch.sigmoid(i)
            f = torch.sigmoid(f)
            g = torch.tanh(g)
            o = torch.sigmoid(o)

            # 更新细胞状态和隐藏状态
            c = f * c + i * g
            h = o * torch.tanh(c)

            outputs.append(h)

        return torch.stack(outputs, dim=1), (h, c)


# TCN 时间下采样模块
class TemporalDownsampler(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
        super().__init__()
        padding = (kernel_size - 1) * dilation  # 因果填充
        self.conv = nn.Conv1d(
            in_channels,
            out_channels,
            kernel_size,
            stride=2,  # 步长为2，时间维度减半（14→7）
            padding=padding,
            dilation=dilation
        )
        self.relu = nn.ReLU()

    def forward(self, x):
        return self.relu(self.conv(x))


# 完整的 TCN + ConvLSTM 网络
class TCN_ConvLSTM(nn.Module):
    def __init__(self, input_shape=(14, 448, 304), tcn_channels=32, lstm_hidden=64):
        super().__init__()
        T, H, W = input_shape

        # TCN 时间下采样
        self.tcn = TemporalDownsampler(
            in_channels=H * W,  # 展平空间维度为通道
            out_channels=tcn_channels,
            kernel_size=3,
            dilation=2
        )

        # ConvLSTM 处理时空特征
        self.conv_lstm = ConvLSTM2d(
            input_dim=tcn_channels,
            hidden_dim=lstm_hidden,
            kernel_size=3
        )

        # 输出层恢复原始空间维度
        self.final_conv = nn.Conv2d(lstm_hidden, 1, kernel_size=1)

    def forward(self, x):
        # 输入形状: [B, T, H, W] (例如 [B, 14, 448, 304])
        B, T, H, W = x.shape

        # Step 1: 空间维度展平为通道，输入TCN
        x_reshaped = x.view(B, T, H * W).permute(0, 2, 1)  # [B, H*W, T]
        x_tcn = self.tcn(x_reshaped)  # [B, tcn_channels, 7]

        # Step 2: 调整维度以输入 ConvLSTM2d
        x_lstm = x_tcn.permute(0, 2, 1).view(B, 7, -1, H, W)  # [B, 7, tcn_channels, H, W]

        # Step 3: ConvLSTM 处理
        lstm_out, _ = self.conv_lstm(x_lstm)  # lstm_out 形状: [B, 7, lstm_hidden, H, W]

        # Step 4: 输出层处理每个时间步
        outputs = []
        for t in range(lstm_out.shape[1]):
            out_t = self.final_conv(lstm_out[:, t, :, :, :])  # [B, 1, H, W]
            outputs.append(out_t)

        # 合并时间步
        return torch.stack(outputs, dim=1)  # [B, 7, H, W]


# 测试代码
if __name__ == "__main__":
    model = TCN_ConvLSTM()
    dummy_input = torch.randn(2, 14, 448, 304)  # Batch=2
    output = model(dummy_input)
    print(output.shape)  # 期望输出: torch.Size([2, 7, 448, 304])