import logging
from typing import Any, Dict, List

import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.functional import nll_loss

from allennlp.common import Params, squad_eval
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.tri_linear_attention import TriLinearAttention
from allennlp.modules.variational_dropout import VariationalDropout
from allennlp.nn import InitializerApplicator, util
from allennlp.training.metrics import Average, BooleanAccuracy, CategoricalAccuracy
from allennlp.modules.variational_dropout import VariationalDropout
from torch.nn.functional import softmax, log_softmax, cross_entropy
import numpy as np
import importlib

logger = logging.getLogger('brc')  # pylint: disable=invalid-name

def build(self, cfg):
     assert 'type' in cfg and 'kwargs' in cfg, cfg.keys()
     module_name, cls_name = cfg['type'].rsplit('.', 1)
     module = importlib.import_module(module_name)
     cls = getattr(module, cls_name)
     return cls(**cfg['kwargs'])

class AttnPool(torch.nn.Module):
    def __init__(self, inplanes, hidden_dim, dropout_p=0.5):
        super(AttnPool, self).__init__()
        self.inplanes = inplanes
        self.hidden_dim = hidden_dim
        self.fc_in = torch.nn.Linear(inplanes, hidden_dim)
        self.fc_out = torch.nn.Linear(hidden_dim, 1, bias=False)
        self.dropout = VariationalDropout(dropout_p)
    def forward(self, memory, mask):
        '''
            memory: [batch, seq_len, inplanes]
            mask: [batch, seq_len]
        '''
        d_memory = self.dropout(memory)
        s0 = F.tanh(self.fc_in(d_memory))
        s = self.fc_out(s0).squeeze()
        masked_s = util.replace_masked_values(s, mask, -1e7)
        a = F.softmax(masked_s, dim=-1).unsqueeze(2)
        a_memory = memory * a
        return a_memory.sum(dim=1)

@Model.register("bidaf-rank-self-atten")
class DuReaderRankBidafSelfAttn(Model):

    def __init__(self, vocab: Vocabulary,
                 text_field_embedder: TextFieldEmbedder,
                 phrase_layer: Seq2SeqEncoder,
                 residual_encoder: Seq2SeqEncoder,
                 initializer: InitializerApplicator,
                 dropout: float = 0.2,
                 activation: str = 'softmax',
                 mask_lstms: bool = True) -> None:
        super(DuReaderRankBidafSelfAttn, self).__init__(vocab)

        self._text_field_embedder = text_field_embedder
        self._phrase_layer = phrase_layer
        self._matrix_attention = TriLinearAttention(200)
        self.activation = activation

        self._merge_atten = TimeDistributed(torch.nn.Linear(200 * 4, 200))

        self._residual_encoder = residual_encoder
        self._self_atten = TriLinearAttention(200)
        self._merge_self_atten = TimeDistributed(torch.nn.Linear(200 * 3, 200))
        self.attn_pool = AttnPool(200, 200, dropout_p=dropout)
        self.fc = torch.nn.Linear(200, 1)

        initializer(self)

        if dropout > 0:
            # self._dropout = torch.nn.Dropout(p=dropout)
            self._dropout = VariationalDropout(p=dropout)
        else:
            raise ValueError()
            # self._dropout = lambda x: x
        self._mask_lstms = mask_lstms

    def forward(self,  # type: ignore
                input,
                metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
        # pylint: disable=arguments-differ
        """
        Parameters
        ----------
        Returns
        -------
        An output dictionary consisting of:
        span_start_logits : torch.FloatTensor
            A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
            probabilities of the span start position.
        span_start_probs : torch.FloatTensor
            The result of ``softmax(span_start_logits)``.
        span_end_logits : torch.FloatTensor
            A tensor of shape ``(batch_size, passage_length)`` representing unnormalised log
            probabilities of the span end position (inclusive).
        span_end_probs : torch.FloatTensor
            The result of ``softmax(span_end_logits)``.
        best_span : torch.IntTensor
            The result of a constrained inference over ``span_start_logits`` and
            ``span_end_logits`` to find the most probable span.  Shape is ``(batch_size, 2)``.
        loss : torch.FloatTensor, optional
            A scalar loss to be optimised.
        best_span_str : List[str]
            If sufficient metadata was provided for the instances in the batch, we also return the
            string from the original passage that the model thinks is the best answer to the
            question.
        """
        question = input['question']
        passage = input['passage']

        embedded_question = self._dropout(self._text_field_embedder(question))
        embedded_passage = self._dropout(self._text_field_embedder(passage))
        real_batch_size = embedded_question.size(0)
        batch_size = embedded_passage.size(0)
        max_p_num = batch_size // real_batch_size
        assert batch_size % real_batch_size == 0, 'fake:{}, real:{}'.format(batch_size, real_batch_size)
        q_shape = embedded_question.shape
        embedded_question = embedded_question.unsqueeze(dim=1).expand(
            q_shape[0], max_p_num, q_shape[1], q_shape[2]).contiguous().view(-1, q_shape[1], q_shape[2])
        assert embedded_question.shape[0] == batch_size
        passage_length = embedded_passage.size(1)
        question_mask = util.get_text_field_mask(question).float()
        question_mask = question_mask.unsqueeze(dim=1).expand(q_shape[0], max_p_num, q_shape[1]).contiguous().view(-1, q_shape[1])
        passage_mask = util.get_text_field_mask(passage).float()
        question_lstm_mask = question_mask if self._mask_lstms else None
        passage_lstm_mask = passage_mask if self._mask_lstms else None

        encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
        encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
        encoding_dim = encoded_question.size(-1)

        # Shape: (batch_size, passage_length, question_length)
        passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
        # Shape: (batch_size, passage_length, question_length)
        passage_question_attention = util.last_dim_softmax(passage_question_similarity, question_mask)
        # Shape: (batch_size, passage_length, encoding_dim)
        passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)

        # We replace masked values with something really negative here, so they don't affect the
        # max below.
        masked_similarity = util.replace_masked_values(passage_question_similarity,
                                                       question_mask.unsqueeze(1),
                                                       -1e7)
        # Shape: (batch_size, passage_length)
        question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
        # Shape: (batch_size, passage_length)
        question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
        # Shape: (batch_size, encoding_dim)
        question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
        # Shape: (batch_size, passage_length, encoding_dim)
        tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(batch_size,
                                                                                    passage_length,
                                                                                    encoding_dim)

        # Shape: (batch_size, passage_length, encoding_dim * 4)
        final_merged_passage = torch.cat([encoded_passage,
                                          passage_question_vectors,
                                          encoded_passage * passage_question_vectors,
                                          encoded_passage * tiled_question_passage_vector],
                                         dim=-1)

        final_merged_passage = F.relu(self._merge_atten(final_merged_passage))

        residual_layer = self._dropout(self._residual_encoder(self._dropout(final_merged_passage), passage_mask))
        self_atten_matrix = self._self_atten(residual_layer, residual_layer)

        mask = passage_mask.resize(batch_size, passage_length, 1) * passage_mask.resize(batch_size, 1, passage_length)

        # torch.eye does not have a gpu implementation, so we are forced to use the cpu one and .cuda()
        # Not sure if this matters for performance
        self_mask = Variable(torch.eye(passage_length, passage_length).cuda()).resize(1, passage_length, passage_length)
        mask = mask * (1 - self_mask)

        self_atten_probs = util.last_dim_softmax(self_atten_matrix, mask)

        # Batch matrix multiplication:
        # (batch, passage_len, passage_len) * (batch, passage_len, dim) -> (batch, passage_len, dim)
        self_atten_vecs = torch.matmul(self_atten_probs, residual_layer)

        residual_layer = F.relu(self._merge_self_atten(torch.cat(
            [self_atten_vecs, residual_layer, residual_layer * self_atten_vecs], dim=-1)))

        final_merged_passage += residual_layer

        final_merged_passage = self._dropout(final_merged_passage)
        rep_passage = self.attn_pool(final_merged_passage, passage_lstm_mask)
        passage_score = self.fc(rep_passage).view(real_batch_size, -1)
        valid_passage = (passage_mask.sum(dim=1) > 0).float().view(real_batch_size, -1)
        #logger.info(f'passage_score:{passage_score.shape}') 
        #logger.info(f'valid_passage:{valid_passage.shape}')
        passage_score += (1-valid_passage) * (-1e7)
        output = {}
        if self.training:
            label = input['most_related_para']
            if self.activation == 'softmax':
                loss = F.cross_entropy(passage_score, label)
                score = F.softmax(passage_score, dim=1)
            else:
                label_np = label.data.cpu().numpy()
                B, C = passage_score.shape
                new_label = np.zeros((B, C))
                new_label[range(B), label_np] = 1
                new_label = torch.autograd.Variable(torch.from_numpy(new_label)).cuda().float()
                loss = F.binary_cross_entropy_with_logits(passage_score, new_label)
                score = F.sigmoid(passage_score)
            acc = accuracy(score, label)[0]
            output['acc'] = acc
            output['loss'] = loss
        if self.activation == 'softmax':
            score = F.softmax(passage_score, dim=1)
        else:
            score = F.sigmoid(passage_score)
        output['score'] = score
        return output

    @classmethod
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DuReaderRankBidafSelfAttn':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer"))
        residual_encoder = Seq2SeqEncoder.from_params(params.pop("residual_encoder"))
        initializer = InitializerApplicator.from_params(params.pop("initializer", []))
        dropout = params.pop('dropout', 0.2)

        # TODO: Remove the following when fully deprecated
        evaluation_json_file = params.pop('evaluation_json_file', None)
        if evaluation_json_file is not None:
            logger.warning("the 'evaluation_json_file' model parameter is deprecated, please remove")

        activation = params.pop('activation', 'softmax')
        mask_lstms = params.pop('mask_lstms', True)
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   phrase_layer=phrase_layer,
                   residual_encoder=residual_encoder,
                   initializer=initializer,
                   dropout=dropout,
                   activation=activation,
                   mask_lstms=mask_lstms)
def accuracy(output, target, topk=(1, ), ignore_index=-1):
    """Computes the precision@k for the specified values of k"""
    keep = torch.nonzero(target != ignore_index).squeeze()
    #logger.info('target.shape:{0}, keep.shape:{1}'.format(target.shape, keep.shape))
    assert (keep.dim() == 1)
    target = target[keep]
    output = output[keep]
    maxk = max(topk)
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res
