import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
import torch.nn.functional as F


class EncoderCNN(nn.Module):

    def __init__(self, embed_size):
        super(EncoderCNN, self).__init__()

        # resnet = models.resnet152(pretrained=True) # ResNet-152
        # modules = list(resnet.children())[:-1]
        # self.resnet = nn.Sequential(*modules)

        vgg16 = models.vgg16(pretrained=True)
        conv_blocks = list(vgg16.features.children())[:31] # 全部31
        self.vgg= nn.Sequential(*conv_blocks)

        self.linear = nn.Linear(25088, embed_size) # 全部25088   ,
        self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)

    def forward(self, images):
        # 编码器用来实现提取特征，并不需要计算梯度
        with torch.no_grad():
            features = self.vgg(images)

        features = features.view(features.size(0), -1)

        features = self.bn(self.linear(features))

        return features


class DecoderRNN(nn.Module):

    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):

        super(DecoderRNN, self).__init__()

        self.embed = nn.Embedding(vocab_size, embed_size)

        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True) # ---------------------------------------

        self.linear1 = nn.Linear(hidden_size, 2048)
        self.linear2 = nn.Linear(2048, vocab_size)

    def forward(self, features, captions, lengths): # 128*25=3200

        embeddings = self.embed(captions)
        embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
        packed = pack_padded_sequence(embeddings, lengths, batch_first=True ,enforce_sorted=False)# packed 2382,256
        out, _ = self.lstm(packed)#2382,512
        outputs = self.linear1(out[0]) #out[0]：2382 ，512
        outputs = F.leaky_relu(outputs)
        outputs = self.linear2(outputs)
        return outputs  # 2382，13635


    def sample(self, features, max_len,states=None):
        sampled_ids = []
        inputs = features.unsqueeze(1)
        for i in range(max_len):
            hiddens, states = self.lstm(inputs, states)  # hiddens: (batch_size, 1, hidden_size)
            outputs = self.linear1(hiddens.squeeze(1))  # outputs:  (batch_size, vocab_size)
            outputs = F.leaky_relu(outputs)
            outputs = self.linear2(outputs)
            _, predicted = outputs.max(1)  # predicted: (batch_size)
            sampled_ids.append(predicted)
            inputs = self.embed(predicted)  # inputs: (batch_size, embed_size)
            inputs = inputs.unsqueeze(1)  # inputs: (batch_size, 1, embed_size)
        sampled_ids = torch.stack(sampled_ids, 1)  # sampled_ids: (batch_size, max_seq_length)
        return sampled_ids


