# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.

import itertools
import os
import sys
import torch

from fairseq import tokenizer
from fairseq import options, utils
from fairseq.tokenizer import tokenize_line
from fairseq.data import (
    ConcatDataset,
    data_utils,
    indexed_dataset,
    Dictionary,
)
from coherence_story.data import HierEventStoryDataset
from coherence_story.data import BPEDictionary
from fairseq.tasks import FairseqTask, register_task

def encode_event(line, dictory=None, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
    if line.startswith('<A0> '):
        line = line[5:]
        all_event = line.strip('\n').split(' <A0> ')
    else:
        all_event = line.strip('\n').split(' <SEP> ')
    all_ids = []
    for event in all_event:
        words = tokenize_line(event)
        if reverse_order:
            words = list(reversed(words))
        nwords = len(words)
        ids = torch.IntTensor(nwords + 1 if append_eos else nwords)

        for i, word in enumerate(words):
            if add_if_not_exist:
                idx = dictory.add_symbol(word)
            else:
                idx = dictory.index(word)
            if consumer is not None:
                consumer(word, idx)
            ids[i] = idx
        if append_eos:
            ids[nwords] = dictory.eos()
        all_ids.append(ids)
    all_ids = data_utils.collate_tokens(all_ids, dictory.pad(), left_pad=False, move_eos_to_beginning=False)
    return all_ids

def encode_entity(line, dictory=None, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
    all_event = line.strip('\n').split(',')
    all_ids = []
    for event in all_event:
        words = tokenize_line(event.split('@')[0])
        if reverse_order:
            words = list(reversed(words))
        nwords = len(words)
        ids = torch.IntTensor(nwords)

        for i, word in enumerate(words):
            if add_if_not_exist:
                idx = dictory.add_symbol(word)
            else:
                idx = dictory.index(word)
            if consumer is not None:
                consumer(word, idx)
            ids[i] = idx
        all_ids.append(ids)
    all_ids = data_utils.collate_tokens(all_ids, dictory.pad(), left_pad=False, move_eos_to_beginning=False)
    return all_ids

def encode_line_of_event(line, dictory=None, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
    words = line.strip('\n').split(' ')
    if reverse_order:
        words = list(reversed(words))
    nwords = len(words)
    ids = torch.IntTensor(nwords + 1 if append_eos else nwords)

    for i, word in enumerate(words):
        idx = int(word)
        ids[i] = idx
    if append_eos:
        ids[nwords] = ids[nwords - 1]
    return ids

def encode_line_of_entity(line, dictory=None, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
    words = line.strip('\n').split(' ')
    if reverse_order:
        words = list(reversed(words))
    nwords = len(words)
    ids = torch.IntTensor(nwords + 1 if append_eos else nwords)

    for i, word in enumerate(words):
        idx = int(word)
        ids[i] = idx
    if append_eos:
        ids[nwords] = 0
    return ids

def load_eventstory_dataset(
    data_path, split,
    src, src_dict,
    tgt, tgt_dict,
    combine, dataset_impl, upsample_primary,
    left_pad_source, left_pad_target, max_source_positions, max_target_positions,
    use_context, flat_event, generated_event_path, generated_tgt_path
):
    def split_exists(split, src, tgt, lang, data_path):
        filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
        return indexed_dataset.dataset_exists(filename, impl=dataset_impl)

    # infer langcode
    if split_exists(split, src, tgt, src, data_path):
        prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt))
    else:
        raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))

    if generated_event_path is not None:
        print('| loading generated source')
        src_dataset = indexed_dataset.make_dataset(generated_event_path + '.formatevent', impl='raw', fix_lua_indexing=True, dictionary=src_dict, encode_line=encode_event)
        entity_dataset = indexed_dataset.make_dataset(generated_event_path + '.coref', impl='raw', fix_lua_indexing=True, dictionary=src_dict, encode_line=encode_entity)

        tgt_dataset = indexed_dataset.make_dataset(generated_event_path + '.tgt', impl='raw', fix_lua_indexing=True, dictionary=tgt_dict)
        tgt_event_label_dataset = None
        tgt_entity_label_dataset = None
        assert len(src_dataset) == len(tgt_dataset) == len(entity_dataset)
    else:
        src_dataset = indexed_dataset.make_dataset(prefix + src, impl=dataset_impl, fix_lua_indexing=True, dictionary=src_dict, encode_line=encode_event)
        entity_dataset = indexed_dataset.make_dataset(prefix + src + '.coref', impl=dataset_impl, fix_lua_indexing=True, dictionary=src_dict, encode_line=encode_entity)

        tgt_dataset = indexed_dataset.make_dataset(prefix + tgt, impl=dataset_impl, fix_lua_indexing=True, dictionary=tgt_dict)
        tgt_event_label_dataset = indexed_dataset.make_dataset(prefix + tgt + '.event.label.bpe', impl=dataset_impl, fix_lua_indexing=True, dictionary=tgt_dict, encode_line=encode_line_of_event)
        tgt_entity_label_dataset = indexed_dataset.make_dataset(prefix + tgt + '.coref.label.bpe', impl=dataset_impl, fix_lua_indexing=True, dictionary=tgt_dict, encode_line=encode_line_of_entity)

        print('| {} {} {}-{} {} examples'.format(data_path, split, src, tgt, len(src_dataset)))

        assert len(src_dataset) == len(tgt_dataset) == len(entity_dataset) == len(tgt_event_label_dataset) == len(tgt_entity_label_dataset)

    return HierEventStoryDataset(
        src_dataset, src_dataset.sizes, src_dict,
        tgt_dataset, tgt_dataset.sizes, tgt_dict,
        entity_dataset, tgt_event_label_dataset, tgt_entity_label_dataset,
        left_pad_source=left_pad_source,
        left_pad_target=left_pad_target,
        max_source_positions=max_source_positions,
        max_target_positions=max_target_positions,
        use_context=use_context,
        flat_event=flat_event
    )


@register_task('hierstory_bpe_gan')
class BPEHierGanStoryTask(FairseqTask):
    """
    Translate from one (source) language to another (target) language.

    Args:
        src_dict (~fairseq.data.Dictionary): dictionary for the source language
        tgt_dict (~fairseq.data.Dictionary): dictionary for the target language

    .. note::

        The translation task is compatible with :mod:`fairseq-train`,
        :mod:`fairseq-generate` and :mod:`fairseq-interactive`.

    The translation task provides the following additional command-line
    arguments:

    .. argparse::
        :ref: fairseq.tasks.translation_parser
        :prog:
    """

    @staticmethod
    def add_args(parser):
        """Add task-specific arguments to the parser."""
        # fmt: off
        parser.add_argument('data', help='colon separated path to data directories list, \
                            will be iterated upon during epochs in round-robin manner')
        parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
                            help='source language')
        parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
                            help='target language')
        parser.add_argument('--lazy-load', action='store_true',
                            help='load the dataset lazily')
        parser.add_argument('--raw-text', default=False, action='store_true',
                            help='load raw text dataset')
        parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
                            help='pad the source on the left')
        parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
                            help='pad the target on the left')
        parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the source sequence')
        parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
                            help='max number of tokens in the target sequence')
        parser.add_argument('--upsample-primary', default=1, type=int,
                            help='amount to upsample primary dataset')
        parser.add_argument('--generated-event', default=False, metavar='BOOL',
                            help='if set, uses generated event')
        parser.add_argument('--generated-event-path', default='path', metavar='DIR',
                            help='path to save checkpoints')
        parser.add_argument('--generated-tgt-path', default='path', metavar='DIR',
                            help='path to save checkpoints')
        # fmt: on

    def __init__(self, args, src_dict, tgt_dict):
        super().__init__(args)
        self.src_dict = src_dict
        self.tgt_dict = tgt_dict

    @classmethod
    def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8, src=False, tgt=False):
        """Build the dictionary

        Args:
            filenames (list): list of filenames
            workers (int): number of concurrent workers
            threshold (int): defines the minimum word count
            nwords (int): defines the total number of words in the final dictionary,
                including special symbols
            padding_factor (int): can be used to pad the dictionary size to be a
                multiple of 8, which is important on some hardware (e.g., Nvidia
                Tensor Cores).
        """
        d = Dictionary()
        if src:
            d.add_symbol('<ent>0', sys.maxsize)
        for filename in filenames:
            Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
        d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
        return d

    @classmethod
    def load_dictionary(cls, filename, lang='source'):
        """Load the dictionary from the filename

        Args:
            filename (str): the filename
        """
        if lang == 'source':
            return Dictionary.load(filename)
        else:
            return BPEDictionary(filename)

    @classmethod
    def setup_task(cls, args, **kwargs):
        """Setup the task (e.g., load dictionaries).

        Args:
            args (argparse.Namespace): parsed command-line arguments
        """
        args.left_pad_source = options.eval_bool(args.left_pad_source)
        args.left_pad_target = options.eval_bool(args.left_pad_target)
        if getattr(args, 'raw_text', False):
            utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
            args.dataset_impl = 'raw'
        elif getattr(args, 'lazy_load', False):
            utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
            args.dataset_impl = 'lazy'

        paths = args.data.split(':')
        assert len(paths) > 0
        # find language pair automatically
        if args.source_lang is None or args.target_lang is None:
            args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
        if args.source_lang is None or args.target_lang is None:
            raise Exception('Could not infer language pair, please provide it explicitly')

        # load dictionaries
        src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)), 'source')
        tgt_dict = cls.load_dictionary(paths[0], 'target')
        # assert src_dict.pad() == tgt_dict.pad()
        # assert src_dict.eos() == tgt_dict.eos()
        # assert src_dict.unk() == tgt_dict.unk()
        print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
        print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))

        return cls(args, src_dict, tgt_dict)

    def load_dataset(self, split, epoch=0, combine=False, **kwargs):
        """Load a given dataset split.

        Args:
            split (str): name of the split (e.g., train, valid, test)
        """
        paths = self.args.data.split(':')
        assert len(paths) > 0
        data_path = paths[epoch % len(paths)]

        # infer langcode
        src, tgt = self.args.source_lang, self.args.target_lang

        self.datasets[split] = load_eventstory_dataset(
            data_path, split, src, self.src_dict, tgt, self.tgt_dict,
            combine=combine, dataset_impl=self.args.dataset_impl,
            upsample_primary=self.args.upsample_primary,
            left_pad_source=self.args.left_pad_source,
            left_pad_target=self.args.left_pad_target,
            max_source_positions=self.args.max_source_positions,
            max_target_positions=self.args.max_target_positions,
            use_context=getattr(self.args, 'use_context', False),
            flat_event=getattr(self.args, 'flat_event', False),
            generated_event_path= self.args.generated_event_path if self.args.generated_event else None,
            generated_tgt_path= self.args.generated_tgt_path if self.args.generated_event else None
        )

    def build_dataset_for_inference(self, src_tokens, src_lengths):
        return HierEventStoryDataset(src_tokens, src_lengths, self.source_dictionary)

    def max_positions(self):
        """Return the max sentence length allowed by the task."""
        return (self.args.max_source_positions, self.args.max_target_positions)

    @property
    def source_dictionary(self):
        """Return the source :class:`~fairseq.data.Dictionary`."""
        return self.src_dict

    @property
    def target_dictionary(self):
        """Return the target :class:`~fairseq.data.Dictionary`."""
        return self.tgt_dict

    @property
    def dictionary(self):
        """Return the target :class:`~fairseq.data.Dictionary`."""
        return self.tgt_dict

    @property
    def output_dictionary(self):
        """Return the target :class:`~fairseq.data.Dictionary`."""
        return self.tgt_dict

    def adv_train_step(self, sample, model, scorer, criterion, optimizer, ignore_grad=False):
        """
        Do forward and backward, and return the loss as computed by *criterion*
        for the given *model* and *sample*.

        Args:
            sample (dict): the mini-batch. The format is defined by the
                :class:`~fairseq.data.FairseqDataset`.
            model (~fairseq.models.BaseFairseqModel): the model
            criterion (~fairseq.criterions.FairseqCriterion): the criterion
            optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
            ignore_grad (bool): multiply loss by 0 if this is set to True

        Returns:
            tuple:
                - the loss
                - the sample size, which is used as the denominator for the
                  gradient
                - logging outputs to display while training
        """
        model.train()
        loss, sample_size, logging_output = criterion(model, sample, scorer)
        if ignore_grad:
            loss *= 0
        # with autograd.detect_anomaly():
        optimizer.backward(loss)
        return loss, sample_size, logging_output

