import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

class Learner(nn.Module):

    def __init__(self, opt, device):
        super(Learner, self).__init__()

        self.device = device

        self.num_step = opt.num_step
        # self.num_layer = opt.num_layer
        self.rnn_state_size = opt.rnn_state_size

        self.norm = not opt.no_norm
        self.thr = torch.Tensor([opt.thr]).to(self.device)
        self.dropout = opt.dropout
        self.accuracy = opt.accuracy
        self.top_k = opt.top_k

        self.num_entity = opt.num_entity
        self.num_operator = opt.num_operator
        self.query_is_language = opt.query_is_language

        if not opt.query_is_language:
            self.num_query = opt.num_query
            self.query_embed_size = opt.query_embed_size
        else:
            self.vocab_embed_size = opt.vocab_embed_size
            self.query_embed_size = self.vocab_embed_size
            self.num_vocab = opt.num_vocab
            self.num_word = opt.num_word

        if not opt.query_is_language:
            # self.rnn = nn.LSTM(input_size=self.query_embed_size,
            #                    hidden_size=self.rnn_state_size,
            #                    num_layers=self.num_layer)
            self.rnn = nn.LSTMCell(input_size=self.query_embed_size,
                                   hidden_size=self.query_embed_size)
        else:
            # self.rnn = nn.LSTM(input_size=self.vocab_embed_size,
            #                    hidden_size=self.rnn_state_size,
            #                    num_layers=self.num_layer)
            self.rnn = nn.LSTMCell(input_size=self.query_embed_size,
                                   hidden_size=self.query_embed_size)

        # self.W = nn.Parameter(torch.randn([self.num_layer*self.rnn_state_size, self.num_operator],
        #                                   dtype=torch.float))
        #
        # self.b = nn.Parameter(torch.randn([1, self.num_operator],
        #                                   dtype=torch.float))

        self.linear = nn.Linear(self.rnn_state_size, self.num_operator)

        self.do_dropout = nn.Dropout(p=self.dropout)

        self.init_weights()

    def init_weights(self):
        """ Initialize and prunes weights if needed. """
        # Initialize weights
        self.apply(self._init_weights)

    def _init_weights(self, module):
        """ Initialize the weights """
        if isinstance(module, nn.Linear):
            module.weight.data.normal_()
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_()
        if isinstance(module, nn.LSTM):
            for name, param in module.named_parameters():
                if "weight" in name:
                    torch.nn.init.xavier_normal_(param)
                if "bias" in name:
                    torch.nn.init.zeros_(param)

    def _random_uniform_unit(self, r, c):
        """ Initialize random and unit row norm matrix of size (r, c). """
        bound = 6. / np.sqrt(c)
        init_matrix = np.random.uniform(-bound, bound, (r, c))
        init_matrix = np.array(list(map(lambda row: row / np.linalg.norm(row), init_matrix)))
        return init_matrix

    def _build_onehot(self, indices):
        onehotvec = F.one_hot(indices, num_classes=self.num_entity)
        return onehotvec

    def _build_input(self, queries):
        if not self.query_is_language:  # query size = (b, num_step)
            query_embedding_params = torch.from_numpy(self._random_uniform_unit(
                self.num_query + 1, # <END> token
                self.query_embed_size).astype(float)).to(self.device)  # size=(num_query+1, query_embed_size)
            # rnn_inputs = torch.gather(query_embedding_params, 1, queries)  # (b, num_step, query_embed_size)
            rnn_inputs = []
            for t in range(self.num_step):
                rnn_input = torch.index_select(query_embedding_params, 0, queries[:, t].squeeze())  # (b, query_embed_size)
                rnn_inputs.append(rnn_input)
        else:  # query size = (b, num_step, num_word)
            vocab_embedding_params = torch.from_numpy(self._random_uniform_unit(
                self.num_vocab + 1, # <END> token
                self.vocab_embed_size),
            dtype=torch.float, device=self.device)
            embedded_query = torch.gather(vocab_embedding_params, 1, queries)  # ? TODO
            rnn_inputs = torch.sum(embedded_query, dim=2)  # (b, num_step) TODO: list of tensors.

        return rnn_inputs

    def init_hidden(self, bsz):
        weight = next(self.parameters())
        # return (weight.new_zeros(self.num_layer, bsz, self.rnn_state_size),
        #         weight.new_zeros(self.num_layer, bsz, self.rnn_state_size))
        return (weight.new_zeros(bsz, self.rnn_state_size),
                weight.new_zeros(bsz, self.rnn_state_size))

    def forward(self, qq, tails, heads, mdb):
        '''

        :param queries: tensor of size (b, num_step) or (b, num_step, num_word)
        :param tails: tensor of size (b)
        :param heads: tensor of size (b)
        :param mdb: dict {r: torch.sparse tensor}
        :return:
        '''

        ''' Prepare batch data. '''
        heads = torch.from_numpy(np.array(heads)).to(self.device)
        tails = torch.from_numpy(np.array(tails)).to(self.device)
        if not self.query_is_language:
            queries = [[q] * (self.num_step-1) + [self.num_query] for q in qq]
        else:
            queries = [[q] * (self.num_step - 1) + [[self.num_vocab] * self.num_word] for q in qq]
        queries = torch.from_numpy(np.array(queries)).to(self.device)
        # print(queries)  # TODO: debug

        database = {}
        for r in range(int(self.num_operator / 2)):
            indices, values, dense_shape = mdb[r]
            indices = torch.from_numpy(np.array(indices)).to(self.device)
            values = torch.from_numpy(np.array(values)).to(self.device)
            database[r] = torch.sparse.FloatTensor(indices.t(), values, torch.Size(dense_shape))

        targets = self._build_onehot(heads).float()  # (b, num_entity)

        ''' alg. '''
        rnn_inputs = self._build_input(queries)  # list of tensor of size (b, query_embed_size)
        rnn_inputs = torch.stack(rnn_inputs, dim=0).float()  # (num_step, b, query_embed_size)
        # print(rnn_inputs)  # TODO: debug

        # rnn_outputs, (h,c) = self.rnn(rnn_inputs.float(), (h0, c0))  # outputs size=(num_step, b, num_layer*rnn_state_size)
        # rnn_outputs = rnn_outputs.unbind(dim=0)  # tuple of tensors of size (b, num_layer*rnn_state_size)
        # print(rnn_outputs)  # TODO: debug
        rnn_outputs = []
        attention_operators = []
        ht, ct = self.init_hidden(targets.size(0))
        for rnn_input in rnn_inputs:  # (b, query_embed_size)
            ht, ct = self.rnn(rnn_input, (ht, ct))
            rnn_outputs.append(ht)

            a_t = F.softmax(self.linear(ht), dim=1)  # (b, num_operator)
            a_t = a_t.unsqueeze(1)  # (b, 1, num_operator)
            a_t = a_t.unbind(dim=2)  # tuple of tensors of size (b, 1)
            a_t = list(a_t)
            # print(a_t)  # TODO: debug
            attention_operators.append(a_t)

        rnn_outputs = torch.stack(rnn_outputs, dim=0)

        memories = self._build_onehot(tails).unsqueeze(1)  # (b, will be t+1, num_entity)
        memories = memories.float()
        attention_memories = []
        for t in range(self.num_step):
            attention_memories.append(F.softmax(
                torch.matmul(
                    rnn_outputs[t].unsqueeze(1),
                    rnn_outputs[:t+1].permute(1, 2, 0)),
            dim=-1).squeeze(1))  # (b, t+1)

            memory_read = torch.matmul(
                attention_memories[t].unsqueeze(1),  # (b, 1, t+1)
                memories  # (b, t+1, num_entity)
            ).squeeze(1)  # (b, num_entity)

            if t < self.num_step - 1:
                database_results = []
                # added_database_results = torch.zeros(memory_read.size()).to(self.device)
                memory_read = memory_read.t()  # (num_entity, b)
                # print(torch.max(memory_read))  # TODO: debug
                # o = database[0].to_dense()
                # print(o.max(), o.min(), o.sum())
                # print(t)
                for r in range(int(self.num_operator/2)):

                    for op_matrix, op_attn in zip(
                        [database[r],
                         database[r].to_dense().t().to_sparse()],  # sparse tensor (num_entity, num_entity)
                        [attention_operators[t][r],
                         attention_operators[t][int(r+self.num_operator/2)]]):  # dense tensor (num_layer*rnn_state_size)
                    # for op_matrix, op_attn in zip(
                    #     [database[r].to_dense(),
                    #      database[r].to_dense().t()],
                    #     [attention_operators[t][r],
                    #      attention_operators[t][int(r+self.num_operator/2)]]
                    # ):

                        product = torch.sparse.mm(op_matrix.float(), memory_read)  # (num_entity, b)
                        # product = torch.matmul(op_matrix.float(), memory_read)
                        # print(torch.max(product))  # TODO: debug. Issue
                        # print('='*70)
                        tmp = product.t() * op_attn  # (b, num_entity)
                        database_results.append(tmp)
                        # added_database_results += tmp

                added_database_results = sum(database_results)
                # print(torch.sum(added_database_results))
                # print('='*70)

                # if self.norm:
                #     added_database_results /= torch.max(self.thr,
                #                                         torch.sum(added_database_results, dim=1).unsqueeze(1))

                if self.dropout > 0.:
                    added_database_results = self.do_dropout(added_database_results)

                memories = torch.cat(
                    [memories,
                     added_database_results.unsqueeze(1)], dim=1)  # (b, t+1, num_entity)
            else:
                predictions = memory_read  # (b, num_entity)

        final_loss = - torch.sum(targets * torch.log(torch.max(self.thr, predictions)), dim=1)
        batch_loss = torch.mean(final_loss)

        _, inds = torch.topk(predictions, k=self.top_k)  # (b, top_k)
        if self.accuracy:  # must top_k = 1
            in_top = torch.eq(inds.squeeze(), heads)  # (b)
        else:
            tmp = torch.eq(inds, heads.unsqueeze(-1).expand(-1, inds.size(-1)))  # (b, top_k)
            tmp = torch.sum(tmp, dim=1)  # (b)
            in_top = torch.gt(tmp, 0)  # (b)

        return batch_loss, final_loss, in_top, predictions
