import torch
import torch.nn as nn
import torchvision.models as models
from .model_utils import *
import math


class LstmEncoder(nn.Module):
    def __init__(self, input_size, hidden_size, dropout, bias, num_layers, device):
        super(LstmEncoder, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.device = device
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, bias=bias, dropout=dropout,
                            num_layers=num_layers, batch_first=True).to(device)

    def forward(self, X, length):
        new_lengths, new_lengths_idx = torch.sort(length, descending=True)
        _, re_new_idx = torch.sort(new_lengths_idx)
        X = X.index_select(0, new_lengths_idx)

        pack = torch.nn.utils.rnn.pack_padded_sequence(X, new_lengths, batch_first=True)
        state = self.init_state(X.size(0))
        output, state = self.lstm(pack, state)
        output, _ = torch.nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
        output = output.index_select(0, re_new_idx)
        return output, state

    def init_state(self, batch_size):
        return (torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device),
                torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(self.device))

    def sample(self, X, state):
        output, state = self.lstm(X, state)
        return output, state

class ImageEncoder(nn.Module):
    def __init__(self, device):
        super(ImageEncoder, self).__init__()

        resnet = models.resnet50(pretrained=True)

        # Remove the fully connected layers, since we don't need the original resnet classes anymore
        modules = list(resnet.children())[:-2]
        self.resnet = nn.Sequential(*modules).to(device)
        self.squeeze = nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1).to(device)
        self.dense = nn.Linear(49, 256)

    def forward(self, X):
        x = self.resnet(X)
        output = self.squeeze(x)
        output = output.reshape((output.size(0), output.size(1), -1))
        output = self.dense(output)
        return output


class AdditiveAttention(nn.Module):
    """加性注意力"""

    def __init__(self, query_size, key_size, num_hiddens, dropout, **kwargs):
        super(AdditiveAttention, self).__init__(**kwargs)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
        self.w_v = nn.Linear(num_hiddens, 1, bias=False)
        self.dropout = nn.Dropout(dropout)

    def forward(self, queries, keys, values, valid_lens=None):
        queries, keys = self.W_q(queries), self.W_k(keys)
        # 在维度扩展后，
        # `queries` 的形状：(`batch_size`, 查询的个数, 1, `num_hidden`)
        # `key` 的形状：(`batch_size`, 1, “键－值”对的个数, `num_hiddens`)
        # 使用广播方式进行求和
        # print(queries.size())
        # print(keys.size())
        features = queries.unsqueeze(2) + keys.unsqueeze(1)
        features = torch.tanh(features)
        # `self.w_v` 仅有一个输出，因此从形状中移除最后那个维度。
        # `scores` 的形状：(`batch_size`, 查询的个数, “键-值”对的个数)
        scores = self.w_v(features).squeeze(-1)
        self.attention_weights = masked_softmax(scores, valid_lens)
        # `values` 的形状：(`batch_size`, “键－值”对的个数, 值的维度)
        output = torch.bmm(self.dropout(self.attention_weights), values)
        return output


class LstmDecoder(nn.Module):
    def __init__(self, input_size, hidden_size, vocab_size, dropout, num_layers, device):
        super(LstmDecoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.vocab_size = vocab_size
        self.dropout = dropout
        self.num_layers = num_layers
        self.device = device

        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, dropout=dropout, num_layers=num_layers,
                            batch_first=True).to(device)
        self.dense = nn.Linear(hidden_size, vocab_size).to(device)

    def init_state(self, batch_size):
        return (torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(device=self.device),
                torch.zeros((self.num_layers, batch_size, self.hidden_size)).to(device=self.device))

    def forward(self, X, state):
        output, state = self.lstm(X, state)
        output = self.dense(output)
        return output, state

class Pipeline(nn.Module):
    def __init__(self, vocab_size, embedding_size,
                 queries_size, keys_size, num_hiddens, dropout,  ## ImageAttention
                 lstm_intput_size, lstm_num_hiddens, lstm_dropout, bias, num_layers,  ## LstmEncoder
                 decoder_input_size, decoder_num_hiddens, decoder_dropout, decoder_num_layers,
                 device):
        super(Pipeline, self).__init__()
        self.embedding_size = embedding_size
        self.device = device

        self.embed = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=embedding_size
        ).to(device)

        self.imageEncoder = ImageEncoder(device)
        self.imageAttention = AdditiveAttention(queries_size, keys_size, num_hiddens, dropout)

        self.lstmEncoder = LstmEncoder(lstm_intput_size, lstm_num_hiddens, lstm_dropout, bias, num_layers, device)

        self.lstmDecoder = LstmDecoder(decoder_input_size, decoder_num_hiddens, vocab_size,
                                       decoder_dropout, decoder_num_layers, device)

    def forward(self, X, contexts, valid_lens):
        V = self.imageEncoder(X)  # batch_size * channel * 256
        embeddings = self.embed(contexts) # batch_size * time_step * embedding_size
        t_emb = []
        for i in range(0, X.size(0)):
            temp = torch.zeros((1, 256)).to(self.device)
            new_embedding = torch.cat((temp, embeddings[i]), 0).contiguous()
            t_emb.append(new_embedding.unsqueeze(0))
        embeddings = torch.cat(t_emb, 0)

        H, _ = self.lstmEncoder(embeddings, valid_lens + 1)  # batch_size * time_step * hidden_size
        P = self.imageAttention(H, V, V, valid_lens + 1)  # batch_size * time_step * feature_size
        R = torch.cat((P, H), dim=2)
        state = self.lstmDecoder.init_state(X.size(0))
        output, _ = self.lstmDecoder(R, state)
        return output[:, :-1, :]

    def sample(self, X, end_vocab, max_len=100):
        V = self.imageEncoder(X)

        encodeState = self.lstmEncoder.init_state(X.size(0))
        state = self.lstmDecoder.init_state(X.size(0))
        ret = []
        context = None
        for i in range(max_len):
            if i == 0:
                embedding = torch.zeros((1, 1, self.embedding_size)).to(self.device)
            else:
                embedding = self.embed(context)
            H, encodeState = self.lstmEncoder.sample(embedding, encodeState)
            # print(H.shape, V.shape)
            P = self.imageAttention(H, V, V)
            R = torch.cat((P, H), dim=2)
            output, state = self.lstmDecoder(R, state)
            predicted = output.argmax(dim=2,  keepdim=False)
            ret.append(predicted)
            context = predicted
            if predicted == end_vocab:
                break

        ret = torch.cat(ret, 1)
        return ret.squeeze()


if __name__ == '__main__':


    '''
    LSTM Encoder Test
     '''
    # a = torch.rand((1, 10, 100))
    # b = torch.rand((1, 10, 100))
    # X = torch.cat((a, b), dim=0)
    # length = torch.tensor([5, 10], dtype=torch.int64)
    # print(length.shape)
    # print(X.shape)
    # lstmEncoder = LstmEncoder(100, 256, 0.1, True, 3)
    # output, state = lstmEncoder(X, length)
    # print(output.shape)
    # print('*************************')
    # X = torch.rand((1, 1, 100))
    # state = lstmEncoder.init_state(1)
    # output = lstmEncoder.sample(X, state)
    # print(output.size())

    '''
    Image Encoder Test
    '''
    # X = torch.rand((1, 3, 224, 224))
    # imageEncoder = ImageEncoder("cpu")
    # output = imageEncoder(X)
    # print(output.size())

    '''
    Additive Attention Test
    def __init__(self, query_size, key_size, num_hiddens, dropout, **kwargs):
    '''
    # a = torch.rand((1, 5, 256))
    # b = torch.rand((1, 5, 256))
    # queries = torch.cat((a, b), dim=0)
    # length = torch.tensor([3, 5])
    # key = torch.rand((2, 256, 49))
    # attention = AdditiveAttention(256, 49, 256, 0.1)
    # output = attention(queries, key, key, valid_lens=length)
    # print(output.size())

    '''
    Lstm Decoder Test
    def __init__(self, input_size, hidden_size, vocab_size, dropout, num_layers):
    '''
    # X = torch.rand((2, 61, 256))
    # decoder = LstmDecoder(256, 512, 17, 0.1, 1)
    # state = decoder.init_state(2)
    # output, state = decoder(X, state)
    # print(output.size())
    # print(state[0].size())
    # print(state[1].size())

    '''
    Pipeline Test
     def __init__(self, vocab_size, embedding_size,
                 queries_size, keys_size, num_hiddens, dropout, ## ImageAttention
                 lstm_intput_size, lstm_num_hiddens, lstm_dropout, bias, num_layers, ## LstmEncoder
                 decoder_input_size, decoder_num_hiddens, decoder_dropout, decoder_num_layers,
                 device):
    '''
    # X = torch.rand((2, 3, 224, 224))
    # context = torch.LongTensor(torch.randint(17, (2, 77)))
    # length = torch.tensor([61, 77])
    # pipeline = Pipeline(vocab_size=17, embedding_size=256,
    #                     queries_size=256, keys_size=49, num_hiddens=256, dropout=0.1,
    #                     lstm_intput_size=256, lstm_num_hiddens=512, lstm_dropout=0.1, bias=True, num_layers=2,
    #                     decoder_input_size=512 + 49, decoder_num_hiddens=256, decoder_dropout=0.1, decoder_num_layers=1,
    #                     device='cpu'
    #                     )
    # print(pipeline.state_dict())
    # output = pipeline(X, context, length)
    # print(output.size())

    '''
    Sample Test
    def sample(self, X, context, end_vocab, max_len=100):
    '''

