import torchvision.models as models
from torchvision.models import ResNet152_Weights
import torch
import torch.nn as nn

class CNNv(nn.Module):
    def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and replace top fc layer."""
        super(CNNv, self).__init__()
        resnet = models.resnet152(weights=ResNet152_Weights.IMAGENET1K_V1)
        modules = list(resnet.children())[:-1]  # delete the last fc layer.
        self.resnet = nn.Sequential(*modules)
        self.linear = nn.Linear(resnet.fc.in_features, embed_size)
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
        
    def forward(self, images):
        """Extract feature vectors from input images."""
        if images.size(0) > 1:
            self.bn.train()  # Enable batch norm training
        else:
            self.bn.eval()  # Disable batch norm training, use running stats
        features = self.resnet(images)  # Pass images through the ResNet
        features = features.reshape(features.size(0), -1)  # Flatten the features
        features = self.bn(self.linear(features))  # Apply linear transformation and batch normalization
        return features
    
class CNNv_features(nn.Module):
    def __init__(self, embed_size):
        """Load the pretrained ResNet-152 and replace top fc layer."""
        super(CNNv_features, self).__init__()
        self.adaptive_pool = nn.AdaptiveAvgPool2d((1,1))
        self.linear = nn.Linear(2048, embed_size)
        
        
    def forward(self, features):
        """Extract feature vectors from input images."""
        features = self.adaptive_pool(features)
        features = features.reshape(features.size(0), -1)  # Flatten the features
        features = self.linear(features)  # Apply linear transformation and batch normalization
        return features

class ValueNetworkRNN(nn.Module):
    def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, dropout=0):
        super(ValueNetworkRNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.hidden_size = hidden_size

    def forward(self, captions, lengths):
        # print("Lengths:", lengths)
        # print("Sorted:", lengths.sort(0, descending=True)[0])
        lengths = lengths.view(-1, 1)
        lengths, sort_order = lengths.squeeze(1).sort(dim=0, descending=True)
        captions = captions[sort_order]
        embeddings = self.embedding(captions)
        # Sort the sequences by decreasing length
        packed = nn.utils.rnn.pack_padded_sequence(embeddings, lengths.cpu(), batch_first=True)
        _, (hidden, _) = self.lstm(packed)
        # Unsort the hidden state to match the original input order
        _, unsort_order = sort_order.sort(0)
        hidden = hidden[:, unsort_order]
        return hidden[-1]  # 返回最后一层的隐藏状态

class ValueNetwork(nn.Module):
    def __init__(self, vocab_size, embed_size, cnn_embed_size, rnn_hidden_size):
        super(ValueNetwork, self).__init__()
        self.valrnn = ValueNetworkRNN(embed_size, rnn_hidden_size, vocab_size)
        self.cnnv = CNNv(cnn_embed_size)  # Assuming CNNv is defined elsewhere
        self.linear1 = nn.Linear(cnn_embed_size + rnn_hidden_size, 1024)
        self.linear2 = nn.Linear(1024, 512)
        self.linear3 = nn.Linear(512, 1)
    
    def forward(self, images, captions, lengths):
        cnn_output = self.cnnv(images)  # Process visual input with CNNv
        rnn_output = self.valrnn(captions, lengths)
        state = torch.cat((cnn_output, rnn_output), dim=1)  # Concatenate visual and semantic features
        output = self.linear1(state)
        output = self.linear2(output)
        output = self.linear3(output)
        return output  # Ensure output is of the correct shape for batch processing

class ValueNetwork_features(nn.Module):
    def __init__(self, vocab_size, embed_size, cnn_embed_size, rnn_hidden_size):
        super(ValueNetwork_features, self).__init__()
        self.valrnn = ValueNetworkRNN(embed_size, rnn_hidden_size, vocab_size)
        self.cnnv = CNNv_features(cnn_embed_size)  # Assuming CNNv is defined elsewhere
        self.linear1 = nn.Linear(cnn_embed_size + rnn_hidden_size, 1024)
        self.linear2 = nn.Linear(1024,512)
        self.linear3 = nn.Linear(512, 1)
    
    def forward(self, features, captions, lengths):
        cnn_output = self.cnnv(features)  # Process visual input with CNNv
        rnn_output = self.valrnn(captions, lengths)
        state = torch.cat((cnn_output, rnn_output), dim=1)  # Concatenate visual and semantic features
        output = self.linear1(state)
        output = self.linear2(output)
        output = self.linear3(output)
        return output  # Ensure output is of the correct shape for batch processing