import torch
import torch.nn as nn
from torchvision import models
from ConvLSTM import ConvLSTM
import os


def build_resnet18(pretrain_path=None):
    base_model = models.resnet18(pretrained=False)
    if pretrain_path is not None and os.path.exists(pretrain_path):
        print(f"加载本地权重: {pretrain_path}")
        state_dict = torch.load(pretrain_path, map_location='cpu')
        base_model.load_state_dict(state_dict)
    else:
        print("使用默认随机初始化或自动下载权重")
    return base_model


class ResNetConvLSTM(nn.Module):
    def __init__(self, num_classes=2, pretrain_path=r".\checkpoint\resnet18.pth", lstm_hidden_channels=[64, 32]):
        super().__init__()
        # 使用预训练 ResNet18 的前几层作为特征提取 backbone
        # base_model = models.resnet18(pretrained=pretrained)
        base_model = build_resnet18(pretrain_path=pretrain_path)
        # 我们去掉 avgpool 和 fc，只保留到 layer3
        self.backbone = nn.Sequential(
            base_model.conv1,  # [B, 64, H/2, W/2]
            base_model.bn1,
            base_model.relu,
            base_model.maxpool,  # [B, 64, H/4, W/4]
            base_model.layer1,  # [B, 64, H/4, W/4]
            base_model.layer2,  # [B, 128, H/8, W/8]
            base_model.layer3,  # [B, 256, H/16, W/16]
        )
        self.feature_channels = 256  # 输出通道数
        self.feature_reduction = nn.Conv2d(self.feature_channels, 64, kernel_size=1)
        self.convlstm = ConvLSTM(
            in_channels=64,
            out_channels=lstm_hidden_channels,
            kernel_size=(3, 3),
            num_layers=len(lstm_hidden_channels),
            batch_first=True
        )
        self.pool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(lstm_hidden_channels[-1], num_classes)

    def forward(self, x):
        """
        x: [B, T, C, H, W]
        """
        B, T, C, H, W = x.shape
        x_reshaped = x.view(B * T, C, H, W)

        feat = self.backbone(x_reshaped)  # [B*T, 256, H/16, W/16]

        feat = self.feature_reduction(feat)
        _, c_f, h_f, w_f = feat.shape
        cnn_feats = feat.view(B, T, c_f, h_f, w_f)

        # ConvLSTM 输出
        # _, last_states = self.convlstm(cnn_feats)
        # h_last = last_states[-1][0]  # 最后一层的 hidden state
        layer_output_list, _ = self.convlstm(cnn_feats)
        h_sequence = layer_output_list[0]
        h_avg_time = torch.mean(h_sequence, dim=1)
        # 分类
        # pooled = self.pool(h_last)  # [B, C, 1, 1]
        pooled = self.pool(h_avg_time)  # [B, C, 1, 1]
        logits = self.fc(pooled.flatten(1))  # [B, num_classes]
        return logits


if __name__ == "__main__":
    model = ResNetConvLSTM(num_classes=2, pretrain_path=r".\checkpoint\resnet18.pt").cuda()
    x = torch.randn(2, 15, 3, 360, 640).cuda()
    out = model(x)
    print(out.shape)  # torch.Size([2, 2])
