#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a new model on one or across multiple GPUs.
"""

import collections
import math
import os
import random

import torch

from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.trainer_gan import TrainerGan
from fairseq.meters import AverageMeter, StopwatchMeter


def main(args, init_distributed=False):
    utils.import_user_module(args)

    assert args.max_tokens is not None or args.max_sentences is not None, \
        'Must specify batch size either with --max-tokens or --max-sentences'

    # Initialize CUDA and distributed training
    if torch.cuda.is_available() and not args.cpu:
        torch.cuda.set_device(args.device_id)
    torch.manual_seed(args.seed)
    if init_distributed:
        args.distributed_rank = distributed_utils.distributed_init(args)

    # Print args
    print(args)

    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)

    # Load valid dataset (we load training data below, based on the latest checkpoint)
    for valid_sub_split in args.valid_subset.split(','):
        task.load_dataset(valid_sub_split, combine=True, epoch=0)

    # Build model and criterion
    model = task.build_model(args)
    # if getattr(args, 'pretrained', False):
    if hasattr(args, 'pretrained') and (args.pretrained == 'True' or args.pretrained == True):
        if hasattr(args, 'pretrained_checkpoint'):
            print('| loading generator model weight from {}'.format(args.pretrained_checkpoint))
            generator = checkpoint_utils.load_pretrained_weight(model.generator, args.pretrained_checkpoint)
            model.update_generator(generator)

        if hasattr(args, 'pretrained_checkpoint_dis'):
            print('| loading  discriminator model weight from {}'.format(args.pretrained_checkpoint_dis))
            discriminator = checkpoint_utils.load_pretrained_weight(model.discriminator, args.pretrained_checkpoint_dis)
            model.update_discriminator(discriminator)

    criterion = task.build_criterion(args)
    print(model)
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
    print('| num. model params: {} (num. trained: {})'.format(
        sum(p.numel() for p in model.parameters()),
        sum(p.numel() for p in model.parameters() if p.requires_grad),
    ))

    # # debug
    # for name, param in model.named_parameters():
    #     if param.requires_grad:
    #         print(name, type(param.data), param.size())
    # exit(0)

    # Build trainer
    trainer_generator = TrainerGan(args, task, model.generator, criterion.generator, save_dir='adv_gen_xxx')
    trainer_discriminator = Trainer(args, task, model.discriminator, criterion.discriminator, save_dir='dis_sig')
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

    # Load the latest checkpoint if one is available and restore the
    # corresponding train iterator
    # extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer_generator)
    extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer_discriminator)
    # checkpoint_utils.load_checkpoint_only(args, trainer_discriminator)

    pre_dis_update = 50000
    dis_valid_update = 500
    # =====PRE-TRAINING=====
    # TRAIN GENERATOR
    # print('| pre training generator')
    # train_process('pre_gen', args, trainer_generator, None, task, epoch_itr, args.pre_gen_epoch)

    # TRAIN DISCRIMINATOR
    print('| pre training discriminator')
    train_process('pre_dis', args, trainer_generator, trainer_discriminator, task, epoch_itr, 5, pre_dis_update, dis_valid_update)

    # =====ADVERSARIAL TRAINING=====
    # print('| adv training generator and discriminator')
    # train_process('adv', args, trainer_generator, trainer_discriminator, task, epoch_itr, args.max_epoch, args.max_update, dis_valid_update)

def train_process(mode, args, _trainer_generator, _trainer_discriminator, task, epoch_itr, epoch, update, dis_valid_update):

    data_loader = _trainer_generator
    trainer = _trainer_generator
    if mode == 'pre_dis':
        trainer = _trainer_discriminator
    if mode != 'pre_gen':
        fake_generator = _trainer_generator
    else:
        fake_generator = None

    # Train until the learning rate gets too small
    max_epoch = epoch or math.inf
    max_update = update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
    valid_losses = [None]
    valid_subsets = args.valid_subset.split(',')
    while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
        # train for one epoch
        if mode == 'adv':
            adv_train(args, _trainer_generator, _trainer_discriminator, task, epoch_itr, dis_valid_update)
            dis_valid_update = None
        else:
            train(mode, args, trainer, fake_generator, task, epoch_itr, max_update, dis_valid_update)

        if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
            valid_losses = validate(mode, args, trainer, fake_generator, task, epoch_itr, valid_subsets, dis_valid_update)
        else:
            valid_losses = [None]

        # only use first validation loss to update the learning rate
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])

        # save checkpoint
        if epoch_itr.epoch % args.save_interval == 0:
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])

        if ':' in getattr(args, 'data', ''):
            # sharded data: get train iterator for next epoch
            epoch_itr = data_loader.get_train_iterator(epoch_itr.epoch)

    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))

def train(mode, args, trainer, fake_generator, task, epoch_itr, update, valid_update):
    """Train the model for one epoch."""
    # Update parameters every N batches
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
        if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

    extra_meters = collections.defaultdict(lambda: AverageMeter())
    valid_subsets = args.valid_subset.split(',')
    max_update = update or math.inf
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):

        if mode == 'pre_dis':
            samples = [fake_generator.sample(per_batch) for per_batch in samples]
        log_output = trainer.train_step(samples)

        if log_output is None:
            continue

        # log mid-epoch stats
        stats = get_training_stats(trainer)
        for k, v in log_output.items():
            if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
                if k in ['loss', 'nll_loss']:
                    stats[k + '_now'] = v
                continue  # these are already logged above
            if 'loss' in k:
                extra_meters[k].update(v, log_output['sample_size'])
            else:
                extra_meters[k].update(v)
            stats[k] = extra_meters[k].avg
            stats[k + '_now'] = v
        progress.log(stats, tag=mode + '.train', step=stats['num_updates'])

        # ignore the first mini-batch in words-per-second calculation
        if i == 0:
            trainer.get_meter('wps').reset()

        num_updates = trainer.get_num_updates()
        if (
            not args.disable_validation
            and args.save_interval_updates > 0
            and num_updates % args.save_interval_updates == 0
            and num_updates > 0
        ):
            valid_losses = validate(mode, args, trainer, fake_generator, task, epoch_itr, valid_subsets, valid_update)
            checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])

        if num_updates >= max_update:
            break

    # log end-of-epoch stats
    stats = get_training_stats(trainer)
    for k, meter in extra_meters.items():
        stats[k] = meter.avg
    progress.print(stats, tag=mode + '.train', step=stats['num_updates'])

    # reset training meters
    for k in [
        'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
    ]:
        meter = trainer.get_meter(k)
        if meter is not None:
            meter.reset()


def get_training_stats(trainer):
    stats = collections.OrderedDict()
    stats['loss'] = trainer.get_meter('train_loss')
    if trainer.get_meter('train_nll_loss').count > 0:
        nll_loss = trainer.get_meter('train_nll_loss')
        stats['nll_loss'] = nll_loss
    else:
        nll_loss = trainer.get_meter('train_loss')
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
    stats['wps'] = trainer.get_meter('wps')
    stats['ups'] = trainer.get_meter('ups')
    stats['wpb'] = trainer.get_meter('wpb')
    stats['bsz'] = trainer.get_meter('bsz')
    stats['num_updates'] = trainer.get_num_updates()
    stats['lr'] = trainer.get_lr()
    stats['gnorm'] = trainer.get_meter('gnorm')
    stats['clip'] = trainer.get_meter('clip')
    stats['oom'] = trainer.get_meter('oom')
    if trainer.get_meter('loss_scale') is not None:
        stats['loss_scale'] = trainer.get_meter('loss_scale')
    stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
    stats['train_wall'] = trainer.get_meter('train_wall')
    return stats


def adv_train(args, trainer_generator, trainer_discriminator, task, epoch_itr, valid_update):
    """Train the model for one epoch."""
    # Update parameters every N batches
    update_freq = args.update_freq[epoch_itr.epoch - 1] \
        if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]

    # Initialize data iterator
    itr = epoch_itr.next_epoch_itr(
        fix_batches_to_gpus=args.fix_batches_to_gpus,
        shuffle=(epoch_itr.epoch >= args.curriculum),
    )
    itr = iterators.GroupedIterator(itr, update_freq)
    progress = progress_bar.build_progress_bar(
        args, itr, epoch_itr.epoch, no_progress_bar='simple',
    )

    extra_meters = collections.defaultdict(lambda: AverageMeter())
    valid_subsets = args.valid_subset.split(',')
    max_update = args.max_update or math.inf
    train_mode = ['adv_gen', 'adv_dis']
    for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):

        # mle loss
        # log_output = trainer_generator.train_step(samples)

        # fake examples
        samples = [trainer_generator.sample(per_batch) for per_batch in samples]

        # adv discriminator loss
        # dis_log_output = trainer_discriminator.train_step(samples)
        dis_log_output = None

        # adv generation loss
        gen_log_output = trainer_generator.adv_train_step(trainer_discriminator.model, samples)

        log_output = [gen_log_output, dis_log_output]

        if log_output is None:
            continue

        all_stats = collections.OrderedDict()
        for ii, trainer_logger in enumerate([trainer_generator, trainer_discriminator]):
            # log mid-epoch stats
            stats = get_training_stats(trainer_logger)
            for k, v in log_output[ii].items():
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
                    if k in ['loss', 'nll_loss']:
                        stats[k + '_now'] = v
                    continue  # these are already logged above
                if 'lossxx' in k:
                    extra_meters[k].update(v, log_output['sample_size'])
                else:
                    extra_meters[k].update(v)
                stats[k] = extra_meters[k].avg
                stats[k + '_now'] = v
            if ii == 1:
                stats = map(lambda x: ('dis.' + x[0], x[1]), stats.items())
                stats = collections.OrderedDict(stats)
            all_stats.update(stats)

            # ignore the first mini-batch in words-per-second calculation
            if i == 0:
                trainer_logger.get_meter('wps').reset()

            num_updates = trainer_logger.get_num_updates()
            if (
                not args.disable_validation
                and args.save_interval_updates > 0
                and num_updates % args.save_interval_updates == 0
                and num_updates > 0
            ):
                valid_update = None if ii == 0 else valid_update
                valid_losses = validate(train_mode[ii], args, trainer_logger, trainer_generator, task, epoch_itr, valid_subsets, valid_update)
                checkpoint_utils.save_checkpoint(args, trainer_logger, epoch_itr, valid_losses[0])

            if num_updates >= max_update:
                break
            break
        progress.log(all_stats, tag='adv.train')

    all_stats = collections.OrderedDict()
    for ii, trainer_logger in enumerate([trainer_generator, trainer_discriminator]):
        # log end-of-epoch stats
        stats = get_training_stats(trainer_logger)
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
        if ii == 1:
            stats = map(lambda x: ('dis.' + x[0], x[1]), stats.items())
            stats = collections.OrderedDict(stats)
        all_stats.update(stats)

        # reset training meters
        for k in [
            'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
        ]:
            meter = trainer_logger.get_meter(k)
            if meter is not None:
                meter.reset()
        break
    progress.print(all_stats, tag='adv.train')


def validate(mode, args, trainer, fake_generator, task, epoch_itr, subsets, valid_update=None):
    """Evaluate the model on the validation set(s) and return the losses."""
    valid_losses = []
    task_max_pos = task.max_positions()
    model_max_pos = trainer.get_model().max_positions()
    if isinstance(model_max_pos, int) and isinstance(task_max_pos, tuple):
        model_max_pos = (model_max_pos, model_max_pos)
    for subset in subsets:
        # Initialize data iterator
        itr = task.get_batch_iterator(
            dataset=task.dataset(subset),
            max_tokens=args.max_tokens,
            max_sentences=args.max_sentences_valid,
            max_positions=utils.resolve_max_positions(
                task_max_pos,
                model_max_pos,
            ),
            ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
            required_batch_size_multiple=args.required_batch_size_multiple,
            seed=args.seed,
            num_shards=args.distributed_world_size,
            shard_id=args.distributed_rank,
            num_workers=args.num_workers,
        ).next_epoch_itr(shuffle=False)
        progress = progress_bar.build_progress_bar(
            args, itr, epoch_itr.epoch,
            prefix='valid on \'{}\' subset'.format(subset),
            no_progress_bar='simple'
        )

        # reset validation loss meters
        for k in ['valid_loss', 'valid_nll_loss']:
            meter = trainer.get_meter(k)
            if meter is not None:
                meter.reset()
        extra_meters = collections.defaultdict(lambda: AverageMeter())

        count = 0
        for sample in progress:
            if mode == 'pre_dis' or mode == 'adv_dis':
                sample = fake_generator.sample(sample)
            log_output = trainer.valid_step(sample)

            for k, v in log_output.items():
                if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
                    continue
                extra_meters[k].update(v)

            count += 1
            if valid_update is not None and count > valid_update:
                break

        # log validation stats
        stats = get_valid_stats(trainer)
        for k, meter in extra_meters.items():
            stats[k] = meter.avg
        progress.print(stats, tag=mode + subset, step=trainer.get_num_updates())

        valid_losses.append(stats['loss'].avg)
    return valid_losses


def get_valid_stats(trainer):
    stats = collections.OrderedDict()
    stats['loss'] = trainer.get_meter('valid_loss')
    if trainer.get_meter('valid_nll_loss').count > 0:
        nll_loss = trainer.get_meter('valid_nll_loss')
        stats['nll_loss'] = nll_loss
    else:
        nll_loss = stats['loss']
    stats['ppl'] = utils.get_perplexity(nll_loss.avg)
    stats['num_updates'] = trainer.get_num_updates()
    if hasattr(trainer, 'best'):
        stats['best_loss'] = min(
            trainer.best, stats['loss'].avg)
    return stats


def distributed_main(i, args, start_rank=0):
    args.device_id = i
    if args.distributed_rank is None:  # torch.multiprocessing.spawn
        args.distributed_rank = start_rank + i
    main(args, init_distributed=True)


def cli_main():
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)

    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)

    if args.distributed_init_method is not None:
        # distributed training
        if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
            start_rank = args.distributed_rank
            args.distributed_rank = None  # assign automatically
            torch.multiprocessing.spawn(
                fn=distributed_main,
                args=(args, start_rank),
                nprocs=torch.cuda.device_count(),
            )
        else:
            distributed_main(args.device_id, args)
    elif args.distributed_world_size > 1:
        # fallback for single node with multiple GPUs
        assert args.distributed_world_size <= torch.cuda.device_count()
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
        args.distributed_rank = None  # set based on device id
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
    else:
        # single GPU training
        main(args)


if __name__ == '__main__':
    cli_main()
