import torch
import torch.nn as nn
import torchvision.models as models


class Encoder(nn.Module):

    def __init__(self, embedding_size):

        super(Encoder, self).__init__()

        resnet = models.resnet152(pretrained=True)

        # Remove the fully connected layers, since we don't need the original resnet classes anymore
        modules = list(resnet.children())[:-1]
        self.resnet = nn.Sequential(*modules)

        # Create a new fc layer based on the embedding size
        self.linear = nn.Linear(
            in_features=resnet.fc.in_features, out_features=embedding_size)
        self.BatchNorm = nn.BatchNorm1d(
            num_features=embedding_size, momentum=0.01)

    def forward(self, images):
        features = self.resnet(images)
        features = features.view(features.size(0), -1)
        features = self.BatchNorm(self.linear(features))
        return features


class MultiHeadAttention(nn.Module):
    def __init__(self, hidden_size, head_nums, dropout):
        super(MultiHeadAttention, self).__init__()
        self.hidden_size = hidden_size
        self.head_nums = head_nums
        self.head_dim = hidden_size // head_nums
        self.dropout = dropout

        self.fc_q = nn.Linear(hidden_size, hidden_size)
        self.fc_k = nn.Linear(hidden_size, hidden_size)
        self.fc_v = nn.Linear(hidden_size, hidden_size)

        self.fc_o = nn.Linear(hidden_size, hidden_size)
        self.dropout= nn.Dropout(dropout)
        self.scale = torch.sqrt(torch.FloatTensor([self.head_nums])).cuda()

    def forward(self, query, key, value):
        batch_size = query.size(0)

        Q = self.fc_q(query)
        K = self.fc_k(key)
        V = self.fc_v(value)

        Q = Q.view(batch_size, -1, self.head_nums, self.head_dim).permute(0, 2, 1, 3)
        K = K.view(batch_size, -1, self.head_nums, self.head_dim).permute(0, 2, 1, 3)
        V = V.view(batch_size, -1, self.head_nums, self.head_dim).permute(0, 2, 1, 3)

        energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale

        attention = torch.softmax(energy, dim=-1)

        x = torch.matmul(self.dropout(attention), V)

        # x = [batch size, n heads, query len, head dim]

        x = x.permute(0, 2, 1, 3).contiguous()

        # x = [batch size, query len, n heads, head dim]

        x = x.view(batch_size, -1, self.hidden_size)

        # x = [batch size, query len, hid dim]

        x = self.fc_o(x)
        # x = [batch size, query len, hid dim]

        return x, attention

class MyLSTMCell(nn.Module):
    def __init__(self, embedding_size, hidden_size):
        super(MyLSTMCell, self).__init__()
        self.lstm_cell = nn.LSTMCell(embedding_size, hidden_size, bias=True)

    def forward(self, state, current_embedding, image_feat=None):
        h, c = state
        # 根据current_embedding 去计算一个注意力作用在image_feat上
        input_feat = torch.cat((image_feat, current_embedding), dim=1) # combine image and text input
        # input_feat = current_embedding
        h_out, c_out = self.lstm_cell(input_feat, (h, c))
        return h_out, c_out

# https://github.com/njchoma/transformer_image_caption/blob/master/src/models/simple_model.py
class Decoder(nn.Module):

    def __init__(self, embed_size, hidden_size, vocab_size, device):
        super(Decoder, self).__init__()
        self.device = device
        self.embed_size = embed_size
        self.embed = nn.Embedding(
            num_embeddings=vocab_size, embedding_dim=embed_size)
        self.lstm = MyLSTMCell(embedding_size=embed_size * 2, hidden_size=hidden_size)
        self.linear = nn.Linear(in_features=hidden_size,
                                out_features=vocab_size)
        self.attention = MultiHeadAttention(hidden_size, 4, 0)
        self.hidden_size = hidden_size

    """
    features: batch * feature_size
    captions: batch * max_token_length
    length: batch size * 1, representing real token length
    """
    def forward(self, features, captions, length):
        embeddings = []
        for caption in captions:
            temp = self.embed(caption)#.unsqueeze(0)
            embeddings.append(temp)
        features = features.unsqueeze(1) # batch_size * 1 * feature_size(256)
        """
        embeddings size = list_length * batch_szie(1) * timestamp(n) * feature_size(256)
        """
        for i in range(0, features.size(0)):
            temp = torch.zeros((1, 1, 256)).cuda()
            embeddings[i] = torch.cat((temp, embeddings[i]), 1).contiguous() # 在前面补0 作为初始状态

        hiddens = []
        for j, embedding in enumerate(embeddings):
            h = torch.zeros((1, self.hidden_size)).cuda()
            c = torch.zeros((1, self.hidden_size)).cuda()
            hidden = None
            for i in range(embedding.size(1)):
                h, c = self.lstm((h, c), embedding[:, i], features[j])
                if i == embedding.size(1) - 1:
                    continue
                if hidden is None:
                    hidden = h
                else:
                    hidden = torch.cat((hidden, h), 0)
            hiddens.append(hidden)

        att_feat = None
        for hidden in hiddens:
            x, att = self.attention(hidden, hidden, hidden)
            if att_feat is None:
                att_feat = x.squeeze(1)
            else:
                att_feat = torch.cat((att_feat, x.squeeze(1)), 0)
        output = self.linear(att_feat.squeeze(0).contiguous()) # (batch x tokens_length) * labels_count
        return output

    def sample(self, features, longest_sentence_length=100):
        sampled_ids = []
        image_feat = features #batch size * 256
        h = torch.zeros((1, self.hidden_size)).to(self.device)
        c = torch.zeros((1, self.hidden_size)).cuda()
        embedding = torch.zeros((1, 256)).cuda()
        for i in range(longest_sentence_length):

            h, c = self.lstm((h, c), embedding, image_feat)
            output = self.linear(h.squeeze(1))
            predicted = output.max(dim=1, keepdim=True)[1]
            sampled_ids.append(predicted)
            inputs = self.embed(predicted)
            embedding = inputs.view(-1, self.embed_size)

        sampled_ids = torch.cat(sampled_ids, 1)

        return sampled_ids.squeeze()

if __name__ == "__main__":
    decoder = Decoder(256, 512, 19)
    captions = []
    caption = torch.tensor([0,  4,  5,  6,  7,  8,  7,  8,  7,  8,  9, 10,  5, 11,  5, 12,  7, 13,
          7, 14,  9,  9, 10,  5, 15,  5, 12,  7, 13,  7, 14,  9, 15,  5, 12,  7,
         13,  7, 14,  9,  9, 10,  5, 15,  5, 12,  7, 13,  7, 16,  9, 15,  5, 12,
          7, 13,  7, 14,  9,  9,  1])
    captions.append(caption)
    image_feat = torch.rand(1,  256)
    output = decoder(image_feat, captions, None)
    print(output.size())

