import torch
import torch.nn as nn
import torchvision.models as models


class Encoder(nn.Module):

    def __init__(self, embedding_size):

        super(Encoder, self).__init__()

        resnet = models.resnet152(pretrained=True)

        # Remove the fully connected layers, since we don't need the original resnet classes anymore
        modules = list(resnet.children())[:-1]
        self.resnet = nn.Sequential(*modules)

        # Create a new fc layer based on the embedding size
        self.linear = nn.Linear(
            in_features=resnet.fc.in_features, out_features=embedding_size)
        self.BatchNorm = nn.BatchNorm1d(
            num_features=embedding_size, momentum=0.01)

    def forward(self, images):
        features = self.resnet(images)
        features = features.view(features.size(0), -1)
        features = self.BatchNorm(self.linear(features))
        return features

class MyLSTMCell(nn.Module):
    def __init__(self, embedding_size, hidden_size):
        super(MyLSTMCell, self).__init__()
        self.lstm_cell = nn.LSTMCell(embedding_size, hidden_size, bias=True)

    def forward(self, state, current_embedding, image_feat=None):
        h, c = state
        # input_feat = torch.cat((image_feat, current_embedding), dim=1) # combine image and text input
        input_feat = current_embedding
        h_out, c_out = self.lstm_cell(input_feat, (h, c))
        return h_out, c_out

# https://github.com/njchoma/transformer_image_caption/blob/master/src/models/simple_model.py
class Decoder(nn.Module):

    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
        super(Decoder, self).__init__()

        self.embed_size = embed_size
        self.embed = nn.Embedding(
            num_embeddings=vocab_size, embedding_dim=embed_size)
        self.lstm = MyLSTMCell(embedding_size=embed_size * 2, hidden_size=hidden_size)
        self.linear = nn.Linear(in_features=hidden_size,
                                out_features=vocab_size)
        self.hidden_size = hidden_size

    """
    features: batch * feature_size
    captions: batch * max_token_length
    length: batch size * 1, representing real token length
    """
    def forward(self, features, captions, length):
        embeddings = []
        for caption in captions:
            embeddings.append(self.embed(caption))
        features = features.unsqueeze(1) # batch size * 1 * feature size
        for i in range(0, features.size(0)):
            temp = torch.zeros((1, 1, 256)).cuda()
            embeddings[i] = torch.cat((temp, embeddings[i]), 1).contiguous()
            f = features[i].unsqueeze(0)
            f = f.repeat((1, embeddings[i].size(1), 1))
            embeddings[i] = torch.cat((f, embeddings[i]), 2)
        '''
        由于将feature map放在首位 后续的每一个token都与feature map产生联系
        '''
        hiddens = None
        for j, embedding in enumerate(embeddings):
            h = torch.zeros((1, self.hidden_size)).cuda()
            c = torch.zeros((1, self.hidden_size)).cuda()
            for i in range(embedding.size(1)):
                h, c = self.lstm((h, c), embedding[:, i])
                if i == embedding.size(1) - 1:
                    continue
                if hiddens is None:
                    hiddens = h
                else:
                    hiddens = torch.cat((hiddens, h), 0)
        output = self.linear(hiddens.squeeze(0).contiguous()) # (batch x tokens_length) * labels_count
        return output

    def sample(self, features, states=None, longest_sentence_length=100):
        sampled_ids = []
        image_feat = features #batch size * 256
        h = torch.zeros((1, self.hidden_size)).cuda()
        c = torch.zeros((1, self.hidden_size)).cuda()
        t = torch.zeros((1, 256)).cuda()
        inputs = torch.cat((image_feat, t), 1)
        for i in range(longest_sentence_length):

            h, c = self.lstm((h, c), inputs)
            output = self.linear(h.squeeze(1))
            predicted = output.max(dim=1, keepdim=True)[1]
            sampled_ids.append(predicted)
            inputs = self.embed(predicted)
            inputs = inputs.view(-1, self.embed_size)
            inputs = torch.cat((image_feat, inputs), 1)

        sampled_ids = torch.cat(sampled_ids, 1)

        return sampled_ids.squeeze()
