# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from collections import OrderedDict
import random

import torch
import torch.nn as nn
import torch.nn.functional as F

from fairseq import options, utils
from fairseq.models import (
    FairseqIncrementalDecoder,
    FairseqLanguageModel,
    register_model,
    register_model_architecture,
)

@register_model('transformer_lm_bpe_coherence_disa')
class BPECDATransformerLanguageModel(FairseqLanguageModel):
    def __init__(self, decoder):
        super().__init__(decoder)

    @staticmethod
    def add_args(parser):
        """Add model-specific arguments to the parser."""
        # fmt: off
        parser.add_argument('--activation-fn',
                            choices=utils.get_available_activation_fns(),
                            help='activation function to use')
        parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
                            help='dropout probability')
        parser.add_argument('--attention-dropout', default=0., type=float, metavar='D',
                            help='dropout probability for attention weights')
        parser.add_argument('--relu-dropout', default=0., type=float, metavar='D',
                            help='dropout probability after ReLU in FFN')
        parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
                            help='decoder embedding dimension')
        parser.add_argument('--decoder-output-dim', type=int, metavar='N',
                            help='decoder output dimension')
        parser.add_argument('--decoder-input-dim', type=int, metavar='N',
                            help='decoder input dimension')
        parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
                            help='decoder embedding dimension for FFN')
        parser.add_argument('--decoder-layers', type=int, metavar='N',
                            help='num decoder layers')
        parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
                            help='num decoder attention heads')
        parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
                            help='apply layernorm before each decoder block')
        parser.add_argument('--decoder-final-norm', default=False, action='store_true',
                            help='apply layernorm before each decoder block')
        parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
                            help='comma separated list of adaptive softmax cutoff points. '
                                 'Must be used with adaptive_loss criterion')
        parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
                            help='sets adaptive softmax dropout for the tail projections')
        parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N',
                            help='adaptive input factor')
        parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
                            help='if set, disables positional embeddings (outside self attention)')
        parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
                            help='share decoder input and output embeddings')
        parser.add_argument('--character-embeddings', default=False, action='store_true',
                            help='if set, uses character embedding convolutions to produce token embeddings')
        parser.add_argument('--character-filters', type=str, metavar='LIST',
                            default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
                            help='size of character embeddings')
        parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4,
                            help='size of character embeddings')
        parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2,
                            help='number of highway layers for character token embeddder')
        parser.add_argument('--adaptive-input', action='store_true',
                            help='if set, uses adaptive input')
        parser.add_argument('--adaptive-input-factor', type=float, metavar='N',
                            help='adaptive input factor')
        parser.add_argument('--adaptive-input-cutoff', metavar='EXPR',
                            help='comma separated list of adaptive input cutoff points.')
        parser.add_argument('--tie-adaptive-weights', action='store_true',
                            help='if set, ties the weights of adaptive softmax and adaptive input')
        parser.add_argument('--tie-adaptive-proj', action='store_true',
                            help='if set, ties the projection weights of adaptive softmax and adaptive input')
        parser.add_argument('--decoder-learned-pos', action='store_true',
                            help='use learned positional embeddings in the decoder')
        parser.add_argument('--decoder-embed-scale', default=True,
                            help='use learned positional embeddings in the decoder')
        parser.add_argument('--pretrained-checkpoint', metavar='DIR',
                            help='path to load checkpoint from pretrained model')
        parser.add_argument('--pretrained', default=False, action='store_true',
                            help='use pretrained model')
        # fmt: on

    @classmethod
    def build_model(cls, args, task):
        """Build a new model instance."""

        # make sure all arguments are present in older models
        base_lm_architecture_bpe(args)

        if not hasattr(args, 'max_source_positions'):
            args.max_source_positions = args.tokens_per_sample
        if not hasattr(args, 'max_target_positions'):
            args.max_target_positions = args.tokens_per_sample

        padding_idx = task.dictionary.pad() if task.dictionary.pad() != task.dictionary.eos() else None
        embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, padding_idx)

        decoder = TransformerDecoder(
            args, task.output_dictionary, embed_tokens,
        )
        return BPECDATransformerLanguageModel(decoder)

class CNN(nn.Module):
    def __init__(self, embedding_length, out_channels, keep_prob):
        super(CNN, self).__init__()

        """
        Arguments
        ---------
        in_channels : Number of input channels. Here it is 1 as the input data has dimension = (batch_size, num_seq, embedding_length)
        out_channels : Number of output channels after convolution operation performed on the input matrix
        kernel_heights : A list consisting of 3 different kernel_heights. Convolution will be performed 3 times and finally results from each kernel_height will be concatenated.
        keep_probab : Probability of retaining an activation node during dropout operation
        embedding_length : Embedding dimension of GloVe word embeddings
        --------

        """
        self.in_channels = 1
        self.out_channels = out_channels
        # mod = out_channels % 3
        self.kernel_heights = [2, 3, 4]
        self.embedding_length = embedding_length

        self.right_conv1 = nn.Conv2d(self.in_channels, self.out_channels, (self.kernel_heights[0], embedding_length))
        self.right_conv2 = nn.Conv2d(self.in_channels, self.out_channels, (self.kernel_heights[1], embedding_length))
        self.right_conv3 = nn.Conv2d(self.in_channels, self.out_channels, (self.kernel_heights[2], embedding_length))
        self.dropout = nn.Dropout(keep_prob)

    def conv_block(self, input, conv_layer):
        conv_out = conv_layer(input)  # conv_out.size() = (batch_size, out_channels, dim, 1)
        activation = F.tanh(conv_out.squeeze(3))  # activation.size() = (batch_size, out_channels, dim1)
        max_out = F.max_pool1d(activation, activation.size()[2]).squeeze(2)  # maxpool_out.size() = (batch_size, out_channels)

        return max_out

    def forward(self, input_sentences):
        """
        The idea of the Convolutional Neural Netwok for Text Classification is very simple. We perform convolution operation on the embedding matrix
        whose shape for each batch is (num_seq, embedding_length) with kernel of varying height but constant width which is same as the embedding_length.
        We will be using ReLU activation after the convolution operation and then for each kernel height, we will use max_pool operation on each tensor
        and will filter all the maximum activation for every channel and then we will concatenate the resulting tensors. This output is then fully connected
        to the output layers consisting two units which basically gives us the logits for both positive and negative classes.
        """
        # input.size() = (batch_size, num_seq, embedding_length)
        input_sentences = input_sentences.unsqueeze(1)
        # input.size() = (batch_size, 1, num_seq, embedding_length)
        # all_out.size() = (batch_size, num_kernels*out_channels)
        max_out1 = self.conv_block(input_sentences, self.right_conv1)
        max_out2 = self.conv_block(input_sentences, self.right_conv2)
        max_out3 = self.conv_block(input_sentences, self.right_conv3)
        all_out = torch.cat((max_out1, max_out2, max_out3), 1)
        fc_in = self.dropout(all_out)
        # fc_in.size()) = (batch_size, num_kernels*out_channels)
        return fc_in

class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dim, dropout):
        super(MLP, self).__init__()

        layers = OrderedDict()
        layers['fc'] = nn.Linear(input_dim, hidden_dim)
        # layers['relu'] = nn.ReLU()
        layers['tanh'] = nn.Tanh()
        layers['drop'] = nn.Dropout(dropout)
        layers['out'] = nn.Linear(hidden_dim, 1)
        layers['sigmoid'] = nn.Sigmoid()

        self.model = nn.Sequential(layers)

    def forward(self, input):
        return self.model.forward(input)

class MLP_Discriminator(nn.Module):
    def __init__(self, embed_dim, output_dim):
        super(MLP_Discriminator, self).__init__()
        self.embed_dim = embed_dim
        self.hidden_dropout = 0.1

        self.cnn = CNN(embed_dim, output_dim, 0.3)
        self.mlp = MLP(embed_dim * 3, output_dim, self.hidden_dropout)

    def forward(self, s1):

        scores = self.mlp(self.cnn(s1))
        return scores

class TransformerDecoder(FairseqIncrementalDecoder):
    """
    Transformer decoder consisting of *args.decoder_layers* layers. Each layer
    is a :class:`TransformerDecoderLayer`.

    Args:
        args (argparse.Namespace): parsed command-line arguments
        dictionary (~fairseq.data.Dictionary): decoding dictionary
        embed_tokens (torch.nn.Embedding): output embedding
        no_encoder_attn (bool, optional): whether to attend to encoder outputs
            (default: False).
        final_norm (bool, optional): apply layer norm to the output of the
            final decoder layer (default: True).
    """

    def __init__(self, args, dictionary, embed_tokens):
        super().__init__(dictionary)
        self.dropout = args.dropout
        self.share_input_output_embed = args.share_decoder_input_output_embed

        input_embed_dim = embed_tokens.embedding_dim
        embed_dim = args.decoder_embed_dim
        self.output_embed_dim = args.decoder_output_dim

        padding_idx = embed_tokens.padding_idx if embed_tokens.padding_idx is not None else None
        self.max_target_positions = args.max_target_positions

        self.sen_end_index = dictionary.enc.encode(' .')[0]
        self.pad_index = dictionary.pad()

        self.embed_tokens = embed_tokens
        self.embed_scale = math.sqrt(embed_dim)  if args.decoder_embed_scale else None

        self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None

        # only train discriminator layer
        for p in self.parameters():
            p.requires_grad = False
        self.discriminator = MLP_Discriminator(input_embed_dim, self.output_embed_dim)

    def forward(self, prev_output_tokens, **unused):
        """
        Args:
            prev_output_tokens (LongTensor): previous decoder outputs of shape
                `(batch, tgt_len)`, for input feeding/teacher forcing
            encoder_out (Tensor, optional): output from the encoder, used for
                encoder-side attention
            incremental_state (dict): dictionary used for storing state during
                :ref:`Incremental decoding`

        Returns:
            tuple:
                - the decoder's output of shape `(batch, tgt_len, vocab)`
                - a dictionary with any model-specific outputs
        """
        pos_result = self.extract_features(prev_output_tokens, **unused)
        return pos_result

    def extract_features(self, prev_output_tokens, **unused):
        """
        Similar to *forward* but only return features.

        Returns:
            tuple:
                - the decoder's features of shape `(batch, tgt_len, embed_dim)`
                - a dictionary with any model-specific outputs
        """
        # embed tokens and positions
        x =  self.embed_tokens(prev_output_tokens)
        if self.embed_scale is not None:
            x *= self.embed_scale

        if self.project_in_dim is not None:
            x = self.project_in_dim(x)

        # extract sentence state
        bsz, time = prev_output_tokens.size()
        sen_sympol_mask = (prev_output_tokens == self.sen_end_index)
        prev_mask = (prev_output_tokens == self.pad_index)

        sentence_state = x.new_full((bsz, 100, self.output_embed_dim), 0, dtype=torch.float)
        word_sen_label = x.new_full((bsz, time), 0, dtype=torch.long)

        max_sen_len = 0
        # for each story, for each sentence, get sentence state and give sentence label
        for i in range(bsz):
            # story
            if (sen_sympol_mask[i]).any() and (sen_sympol_mask[i]).nonzero().size(0) > 3:
                sens = (sen_sympol_mask[i]).nonzero() + 1
                sen_len = min(100, sens.size(0))
                max_sen_len = sen_len if sen_len > max_sen_len else max_sen_len
                for j in range(sen_len):
                    last = 1 if j == 0 else sens[j-1]
                    sen = x[i, last: sens[j]].mean(dim=0)
                    sentence_state[i, j] = sen
                    word_sen_label[i, last: sens[j]] = j
                word_sen_label[i, sens[-1]:] = sen_len - 1
            else:
                segments_length = 15
                segments_x = torch.split(x[i], segments_length, dim=0)
                for ii, seg in enumerate(segments_x):
                    cur_word_mask = prev_mask[i, ii * segments_length: (ii + 1) * segments_length]
                    if cur_word_mask.any():
                        if cur_word_mask.all():
                            word_sen_label[i, ii * segments_length: (ii + 1) * segments_length] = ii - 1
                        else:
                            temp_mask = (~cur_word_mask).float()
                            sen = (seg * temp_mask.unsqueeze(dim=1)).sum(dim=0) / temp_mask.sum(dim=0, keepdim=True)
                            # sen = seg.mean(dim=0)
                            sentence_state[i, ii] = sen
                            word_sen_label[i, ii * segments_length: (ii + 1) * segments_length] = ii
                    else:
                        sen = seg.mean(dim=0)
                        sentence_state[i, ii] = sen
                        word_sen_label[i, ii * segments_length: (ii + 1) * segments_length] = ii

        # concat pad state, sentence state (bx1,bxn -> bx(n+1))
        sentence_state = sentence_state[:, :max_sen_len, :]

        # positive sentence pair, sentence state: b x s x d, score: b x s x 1
        pos_scores = self.discriminator(sentence_state)
        pos_scores = pos_scores.squeeze(dim=-1)

        # get mask (b x (s-1), b)
        # score_pad_mask = torch.arange(max_sen_len, device=x.device).expand(sentence_state.size(0), max_sen_len) < sen_lens.unsqueeze(1)
        score_pad_mask = (sentence_state.sum(dim=-1) != 0)
        pos_article_lens = score_pad_mask.sum(dim=-1)
        min_article_len = pos_article_lens.min().item()

        # 1. negative article, intra article
        neg_sent2_shuffle = sentence_state.clone()
        for i in range(sentence_state.size(0)):
            shuffle_order = torch.randperm(pos_article_lens[i]-1) + 1
            shuffle_order = shuffle_order[:3]
            temp_state = sentence_state[i, shuffle_order]
            temp_state = torch.roll(temp_state, 1, dims=0)
            neg_sent2_shuffle[i, shuffle_order] = temp_state

        neg_scores_shuffle = self.discriminator(neg_sent2_shuffle)
        # neg_scores_shuffle = neg_scores_shuffle.squeeze(dim=2)

        # 2. negative article, inter article
        neg_sent2_shift_right = sentence_state.clone()
        shift_index = random.sample(range(1, min_article_len), k=1)[0]
        temp_state = sentence_state[:, shift_index, :]
        temp_state = torch.roll(temp_state, 1, dims=0)
        neg_sent2_shift_right[:, shift_index] = temp_state

        neg_scores_shift_right = self.discriminator(neg_sent2_shift_right)
        # neg_scores_shift_right = neg_scores_shift_right.squeeze(dim=2)

        # weight sum
        neg_scores = torch.cat([neg_scores_shuffle, neg_scores_shift_right], dim=-1)
        # neg_weight = F.softmax(neg_scores * 2, dim=-1)
        # neg_scores = (neg_scores * neg_weight).sum(dim=-1)
        neg_scores = neg_scores.mean(dim=-1)
        # neg_scores = torch.zeros_like(pos_scores)
        # neg_scores = neg_scores_shift_right.squeeze(dim=2)

        article_pair_acc = (pos_scores > neg_scores).float().mean()

        # sentence level word reward
        return_word_scores = pos_scores
        word_scores = return_word_scores.unsqueeze(dim=1).expand(-1, time)

        # for debug
        neg_1 = neg_scores_shuffle.mean()
        neg_2 = neg_scores_shift_right.mean()


        return_scores = {'pos_scores': pos_scores, 'neg_scores': neg_scores, 'score_pad_mask': score_pad_mask,
                         'word_scores': word_scores,
                         'article_pair_acc': article_pair_acc,
                         'neg_1': neg_1, 'neg_2': neg_2,
                         }
        return return_scores


    def max_positions(self):
        """Maximum output length supported by the decoder."""
        return self.max_target_positions


def Embedding(num_embeddings, embedding_dim, padding_idx):
    m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
    nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
    if padding_idx is not None:
        nn.init.constant_(m.weight[padding_idx], 0)
    return m


def Linear(in_features, out_features, bias=True):
    m = nn.Linear(in_features, out_features, bias)
    nn.init.xavier_uniform_(m.weight)
    if bias:
        nn.init.constant_(m.bias, 0.)
    return m

@register_model_architecture('transformer_lm_bpe_coherence_disa', 'transformer_lm_bpe_coherence_disa')
def base_lm_architecture_bpe(args):
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
    args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
    args.decoder_layers = getattr(args, 'decoder_layers', 6)
    args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
    args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
    args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
    args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
    args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
    args.activation_fn = getattr(args, 'activation_fn', 'relu')

    args.add_bos_token = getattr(args, 'add_bos_token', False)
    args.character_embeddings = getattr(args, 'character_embeddings', False)

    args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
    args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)

    # The model training is not stable without this
    args.decoder_normalize_before = True

    args.adaptive_input = getattr(args, 'adaptive_input', False)
    args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
    args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)

    args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
    args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)

@register_model_architecture('transformer_lm_bpe_coherence_disa', 'transformer_lm_bpe_coherence_disa_gpt_format')
def transformer_lm_gpt_bpe_format(args):
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
    args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
    args.decoder_layers = getattr(args, 'decoder_layers', 12)
    args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
    args.dropout = getattr(args, 'dropout', 0.1)
    args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
    args.activation_fn = getattr(args, 'activation_fn', 'gelu_accurate')
    args.share_decoder_input_output_embed = True
    args.decoder_normalize_before = True
    args.decoder_final_norm = True
    args.decoder_learned_pos = True
    args.decoder_embed_scale = False
    base_lm_architecture_bpe(args)
