import torch
import torch.nn as nn
from torchvision import models
from ConvLSTM import ConvLSTM
import os


def build_resnet18(pretrain_path=None):
    base_model = models.resnet18(pretrained=False)
    if pretrain_path is not None and os.path.exists(pretrain_path):
        print(f"加载本地权重: {pretrain_path}")
        state_dict = torch.load(pretrain_path, map_location='cpu')
        base_model.load_state_dict(state_dict)
    else:
        print("使用默认随机初始化或自动下载权重")
    return base_model


class ResNetConvLSTM(nn.Module):
    def __init__(self, num_classes=2, pretrain_path=r".\checkpoint\resnet18.pth", lstm_hidden_channels=[64, 32]):
        super().__init__()
        # 使用预训练 ResNet18 的前几层作为特征提取 backbone
        # base_model = models.resnet18(pretrained=pretrained)
        base = build_resnet18(pretrain_path=pretrain_path)
        # 我们去掉 avgpool 和 fc，只保留到 layer3
        self.conv1 = base.conv1
        self.bn1 = base.bn1
        self.relu = base.relu
        self.maxpool = base.maxpool

        self.layer1 = base.layer1  # 输出: [B, 64, H/4, W/4]
        self.layer2 = base.layer2  # 输出: [B, 128, H/8, W/8]
        self.layer3 = base.layer3  # 输出: [B, 256, H/16, W/16]

        # 上采样层，把 layer2、layer3 对齐到 layer1 的尺寸
        self.up2 = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
        self.up3 = nn.Upsample(scale_factor=4, mode="bilinear", align_corners=False)

        # 融合通道数：64 + 128 + 256 = 448
        fusion_in = 64 + 128 + 256
        self.feature_channels = 256  # 输出通道数
        self.fusion_reduce = nn.Conv2d(fusion_in, 64, kernel_size=1)
        self.convlstm = ConvLSTM(
            in_channels=64,
            out_channels=lstm_hidden_channels,
            kernel_size=(3, 3),
            num_layers=len(lstm_hidden_channels),
            batch_first=True
        )
        self.pool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(lstm_hidden_channels[-1], num_classes)

    def forward(self, x):
        """
        x: [B, T, C, H, W]
        """
        B, T, C, H, W = x.shape
        x = x.view(B * T, C, H, W)

        x = self.relu(self.bn1(self.conv1(x)))
        x = self.maxpool(x)

        f1 = self.layer1(x)  # [B*T, 64, H/4, W/4]
        f2 = self.layer2(f1)  # [B*T, 128, H/8, W/8]
        f3 = self.layer3(f2)  # [B*T, 256, H/16, W/16]

        # 上采样到相同尺寸 (H/4, W/4)
        f2_up = nn.functional.interpolate(f2, size=f1.shape[2:], mode="bilinear", align_corners=False)
        f3_up = nn.functional.interpolate(f3, size=f1.shape[2:], mode="bilinear", align_corners=False)

        # 融合
        fusion = torch.cat([f1, f2_up, f3_up], dim=1)  # [B*T, 448, H/4, W/4]
        fusion = self.fusion_reduce(fusion)  # [B*T, 64, H/4, W/4]

        # reshape 回时序输入
        _, C2, H2, W2 = fusion.shape
        fusion = fusion.view(B, T, C2, H2, W2)

        # ConvLSTM 输出
        # _, last_states = self.convlstm(cnn_feats)
        # h_last = last_states[-1][0]  # 最后一层的 hidden state
        layer_output_list, _ = self.convlstm(fusion)
        h_sequence = layer_output_list[-1]
        h_avg_time = torch.mean(h_sequence, dim=1)
        # 分类
        # pooled = self.pool(h_last)  # [B, C, 1, 1]
        pooled = self.pool(h_avg_time)  # [B, C, 1, 1]
        logits = self.fc(self.dropout(pooled.flatten(1)))  # [B, num_classes]
        return logits


if __name__ == "__main__":
    model = ResNetConvLSTM(num_classes=2, pretrain_path=r".\checkpoint\resnet18.pt").cuda()
    x = torch.randn(2, 15, 3, 360, 640).cuda()
    out = model(x)
    print(out.shape)  # torch.Size([2, 2])
