import torch
import torch.nn as nn
from typing import Tuple

class ConvLSTMCell(nn.Module):
    def __init__(self, input_dim, hidden_dim, kernel_size, bias=True):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        padding = kernel_size // 2

        self.conv = nn.Conv2d(in_channels=input_dim + hidden_dim,
                              out_channels=4 * hidden_dim,
                              kernel_size=kernel_size,
                              padding=padding,
                              bias=bias)

    def forward(self, x, h_prev, c_prev):
        combined = torch.cat([x, h_prev], dim=1)  # [B, C+H, H, W]
        conv_out = self.conv(combined)

        cc_i, cc_f, cc_o, cc_g = torch.chunk(conv_out, 4, dim=1)
        i = torch.sigmoid(cc_i)
        f = torch.sigmoid(cc_f)
        o = torch.sigmoid(cc_o)
        g = torch.tanh(cc_g)

        c_next = f * c_prev + i * g
        h_next = o * torch.tanh(c_next)

        return h_next, c_next

class ConvLSTM(nn.Module):
    def __init__(self,
                 input_dim: int,
                 output_dim: int,
                 hidden_dim: int,
                 kernel_size: int = 3,
                 num_layers: int = 1,
                 future_steps: int = 2):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.future_steps = future_steps
        self.kernel_size = kernel_size
        # self.output_dim = output_dim if output_dim is not None else input_dim  # 默认与输入一致

        self.cells = nn.ModuleList()
        for i in range(num_layers):
            curr_input_dim = input_dim if i == 0 else hidden_dim
            self.cells.append(ConvLSTMCell(curr_input_dim, hidden_dim, kernel_size))

        # 最终映射到 target_channels 卷积核为1是 因为要将隐藏层映射回输入的大小。似乎这部出了问题
        self.middle_conv = nn.Conv2d(hidden_dim, input_dim, kernel_size=1) #用于layer之间输出给cell
        self.output_conv = nn.Conv2d(hidden_dim, output_dim, kernel_size=1)

    def forward(self, x):
        # x: [B, T_in, C, H, W]
        B, T, C, H, W = x.size()
        h, c = [None] * self.num_layers, [None] * self.num_layers

        for layer in range(self.num_layers):
            h[layer] = torch.zeros(B, self.hidden_dim, H, W, device=x.device)
            c[layer] = torch.zeros(B, self.hidden_dim, H, W, device=x.device)
        # print(h[0].shape(), c[0].shape()) #2 32 64 64

        # 编码阶段：输入时间序列 利用输入得出所有的隐藏状态和单元状态
        for t in range(T):
            x_in = x[:, t]
            # print( x_in.shape) #2 4 64 64
            for layer in range(self.num_layers):
                h[layer], c[layer] = self.cells[layer](x_in, h[layer], c[layer])
                x_in = h[layer]
        x_in = self.middle_conv(h[-1])  # 这里x_in变成 input_dim 通道数
        # print( x_pred.shape) #2 4 64 64 已经是正常的x输入

        # 预测阶段：未来 steps 就是在这的问题
        outputs = []
        for step in range(self.future_steps):
            # print(f"\n== Step {step} ==")
            x_pred = x_in #2 32 64 64
            for layer in range(self.num_layers):
                # print(f"  Layer {layer} input x_pred shape: {x_pred.shape}, h: {h[layer].shape}")
                # print(x_pred.shape)  #2 32 64 64
                h[layer], c[layer] = self.cells[layer](x_pred, h[layer], c[layer]) #好把就是这步错了
                x_pred = h[layer]
            # print(f"  Final hidden output shape before conv: {h[-1].shape}")
            out = self.output_conv(h[-1])
            # print(f"  Output frame shape: {out.shape}")
            outputs.append(out)

        result = torch.stack(outputs, dim=1)
        # print(f"Final stacked result shape: {result.shape}")
        return result
