import torchvision.models as models
from torchvision.models import ResNet152_Weights
import torch
import torch.nn as nn


class EncoderCNN(nn.Module):
    def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and use it as a feature extractor."""
        super(EncoderCNN, self).__init__()
        resnet = models.resnet152(weights=ResNet152_Weights.IMAGENET1K_V1)
        for param in resnet.parameters():
            param.requires_grad = False  # 固定权重
        # 删除最后的全连接层
        modules = list(resnet.children())[:-1]  # 删除最后的全连接层
        self.resnet = nn.Sequential(*modules)
        self.linear = nn.Linear(resnet.fc.in_features, embed_size)  # resnet.fc.in_features 是ResNet全连接层输入的特征数量
        # 添加冻结的BN层
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01, affine=False)  # affine=False冻结参数
        self.bn.eval() 
    def forward(self, images):
        """Extract feature vectors from input images."""
        with torch.no_grad():  # 冻结所有参数，防止梯度计算
            features = self.resnet(images)  # 通过 ResNet 提取特征

        features = features.reshape(features.size(0), -1)  # 展平特征
        features = self.bn(self.linear(features)) # 64 * 512
        return features

class Encoder_features(nn.Module):
    def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and use it as a feature extractor."""
        super(Encoder_features, self).__init__()
        self.adaptive_pool = nn.AdaptiveAvgPool2d((1,1))
        self.linear = nn.Linear(2048, embed_size)  # resnet.fc.in_features 是ResNet全连接层输入的特征数量
    def forward(self, features):
        features = self.adaptive_pool(features)
  
        features = features.reshape(features.size(0), -1)  # 展平特征
        features = self.linear(features)  
        return features

class SentenceEncoderRNN(nn.Module):
    def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, dropout=0):
        super(SentenceEncoderRNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.hidden_size = hidden_size

    def forward(self, captions, lengths):
        # print("Lengths:", lengths)
        # print("Sorted:", lengths.sort(0, descending=True)[0])
        if lengths.dim() == 1:
            lengths = lengths.unsqueeze(1) 
        lengths, sort_order = lengths.squeeze(1).sort(dim=0, descending=True)
        captions = captions[sort_order]
        embeddings = self.embedding(captions)
        # Sort the sequences by decreasing length
        packed = nn.utils.rnn.pack_padded_sequence(embeddings, lengths.cpu(), batch_first=True)
        _, (hidden, _) = self.lstm(packed)
        # Unsort the hidden state to match the original input order
        _, unsort_order = sort_order.sort(0)
        hidden = hidden[:, unsort_order]
        return hidden[-1]  # 返回最后一层的隐藏状态

class RewardNetwork(nn.Module):
    def __init__(self, vocab_size, embed_size, cnn_embed_size, rnn_hidden_size):
        super(RewardNetwork, self).__init__()
        self.encoder_cnn = EncoderCNN(cnn_embed_size)  # 使用新定义的 CNN
        self.encoder_rnn = SentenceEncoderRNN(embed_size, rnn_hidden_size, vocab_size)  # 使用前面定义的 RNN
        self.fc = nn.Linear(cnn_embed_size, rnn_hidden_size)  # 映射层

    def forward(self, images, captions, lengths):
        image_features = self.encoder_cnn(images)  # CNN 提取图像特征
        sentence_features = self.encoder_rnn(captions, lengths)  # RNN 提取句子特征
        image_features = self.fc(image_features)  # 将图像特征映射到与句子特征相同的维度
        return image_features, sentence_features
    
class RewardNetwork_features(nn.Module):
    def __init__(self, vocab_size, embed_size, cnn_embed_size, rnn_hidden_size):
        super(RewardNetwork_features, self).__init__()
        self.encoder_cnn = Encoder_features(cnn_embed_size)  # 使用新定义的 CNN
        self.encoder_rnn = SentenceEncoderRNN(embed_size, rnn_hidden_size, vocab_size)  # 使用前面定义的 RNN
        self.fc = nn.Linear(cnn_embed_size, rnn_hidden_size)  # 映射层

    def forward(self, features, captions, lengths):
        image_features = self.encoder_cnn(features)  # CNN 提取图像特征
        image_features = self.fc(image_features)  # 将图像特征映射到与句子特征相同的维度
        sentence_features = self.encoder_rnn(captions, lengths)  # RNN 提取句子特征

        return image_features, sentence_features

def cosine_distance(x1, x2=None, eps=1e-8):
    x2 = x1 if x2 is None else x2
    w1 = x1.norm(p=2, dim=1, keepdim=True)
    w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
    return torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)

def GetRewards(images, captions, caplens, model):
    image_features, sentence_features = model(images, captions, caplens)
    
    # 计算余弦距离
    distance = cosine_distance(image_features, sentence_features)
    
    # 取对角线元素 (每个样本与自身的距离)
    rewards = torch.diag(distance)
    
    
    return rewards.unsqueeze(1)

def GetRewards_features(features, captions, caplens, model):
    image_features, sentence_features = model(features, captions, caplens)
    rewards = torch.cosine_similarity(image_features, sentence_features)
    return rewards.unsqueeze(1) 