# -*- coding: utf-8 -*-
from __future__ import print_function

import torchvision
import torchvision.transforms as transforms
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
import torch.optim as optim

from ddl_platform.ddlib import job
import horovod.torch as hvd
#from ddl_platform.ddlib import dopt as hvd


# for bert
from transformers import (
    MODEL_FOR_QUESTION_ANSWERING_MAPPING,
    WEIGHTS_NAME,
    AdamW,
    AutoConfig,
    AutoModelForQuestionAnswering,
    AutoTokenizer,
    get_linear_schedule_with_warmup,
    squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
    compute_predictions_log_probs,
    compute_predictions_logits,
    squad_evaluate,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor

import os
#import models
import torchvision.models as models

class BERTJob(job.Job):

    def __init__(self, conf_yaml):

        super(BERTJob, self).__init__(conf_yaml)
        self._tokenizer = None

        try:
            hvd.size()
        except:
            hvd.init()

    def load_and_cache_examples(self, tokenizer, evaluate=False, output_examples=False):

        config = self.config()['dataset']
        if hvd.size() > 1 and hvd.rank() > 0 and not evaluate:
            # Make sure only the first process in distributed training process the dataset, and the others will use the cache
            hvd.barrier()
    
        # Load data features from cache or dataset file
        input_dir = config['data_dir'] if config['data_dir'] else "."
        cached_features_file = os.path.join(
            input_dir,
            "cached_{}_{}_{}".format(
                "dev" if evaluate else "train",
                list(filter(None, config['model_name_or_path'].split("/"))).pop(),
                str(config['max_seq_length']),
            ),
        )
        # Init features and dataset from cache if it exists
        if os.path.exists(cached_features_file) and not config['overwrite_cache']:
            if hvd.size() > 1 and hvd.rank() == 0 and not evaluate:
                # Make sure only the first process in distributed training process the dataset, and the others will use the cache
                hvd.barrier()
            print("Loading features from cached file %s" % cached_features_file)
            features_and_dataset = torch.load(cached_features_file)
            features, dataset, examples = (
                features_and_dataset["features"],
                features_and_dataset["dataset"],
                features_and_dataset["examples"],
            )
        else:
            print("Creating features from dataset file at %s" %  input_dir)
    
            if not config['data_dir'] and ((evaluate and not config['predict_file'] or (not evaluate and not config['train_file']))):
                try:
                    import tensorflow_datasets as tfds
                except ImportError:
                    raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
    
                #if args.version_2_with_negative:
                #    logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
    
                tfds_examples = tfds.load("squad")
                examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
            else:
                processor = SquadV1Processor() # SquadV2Processor if args.version_2_with_negative else SquadV1Processor()
                if evaluate:
                    examples = processor.get_dev_examples(config['data_dir'], filename=config['predict_file'])
                else:
                    examples = processor.get_train_examples(config['data_dir'], filename=config['train_file'])
    
            features, dataset = squad_convert_examples_to_features(
                examples=examples,
                tokenizer=tokenizer,
                max_seq_length=config['max_seq_length'],
                doc_stride=config['doc_stride'],
                max_query_length=config['max_query_length'],
                is_training=not evaluate,
                return_dataset="pt",
                threads=config['num_cpu_threads'],
            )
    
            if hvd.rank() == 0:
                print("Saving features into cached file %s" % cached_features_file)
                torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file)
    
            if hvd.size() > 1 and hvd.rank() == 0 and not evaluate:
                # Make sure only the first process in distributed training process the dataset, and the others will use the cache
                hvd.barrier()
    
        if output_examples:
            return dataset, examples, features

        return dataset

    def tokenizer(self):
        if self._tokenizer is None:
            self._tokenizer = self.build_tokenizer()

        return self._tokenizer

    def build_dataset(self):
        config = self.config()['dataset']
        data_dir = config['data_dir']

        ## Data loading code
        trainset = self.load_and_cache_examples(self.tokenizer(),  evaluate=False, output_examples=False)
        testset = self.load_and_cache_examples(self.tokenizer(), evaluate=True, output_examples=True)

        return trainset, testset

    def build_tokenizer(self):
        if hvd.size() > 1 and hvd.rank() > 0:
            hvd.barrier()
        #tokenizer = AutoTokenizer.from_pretrained(self.config()['dataset']['tokenizer'], do_lower_case=True, cache_dir=None, use_fast=False)
        #tokenizer = AutoTokenizer.from_pretrained(self.config()['dataset']['model_name_or_path'], do_lower_case=True, cache_dir="/datasets/bert/tokenizer", use_fast=False)
        #tokenizer = AutoTokenizer.from_pretrained(self.config()['dataset']['tokenizer'], do_lower_case=True, use_fast=False)
        #tokenizer = AutoTokenizer.from_pretrained(self.config()['dataset']['model_name_or_path'], do_lower_case=True, cache_dir=None, use_fast=False)
        #config = AutoConfig.from_pretrained(self.config()['dataset']['model_name_or_path'], cache_dir=None)
        #tokenizer.save_pretrained('/datasets/bert/tokenizer')
        #config.save_pretrained('/datasets/bert/tokenizer')
        tokenizer = AutoTokenizer.from_pretrained(self.config()['dataset']['tokenizer'], do_lower_case=True, cache_dir=None, use_fast=False)

        if hvd.size() > 1 and hvd.rank() == 0:
            hvd.barrier()

        return tokenizer

    def build_model(self):

        config = self.config()['dataset']
        if hvd.size() > 1 and hvd.rank() > 0:
            hvd.barrier()
        model_config = AutoConfig.from_pretrained(
            config['model_name_or_path'],
            cache_dir=None,
        )
        model = AutoModelForQuestionAnswering.from_pretrained(
            config['model_name_or_path'],
            from_tf=bool(".ckpt" in config['model_name_or_path']),
            config=model_config,
            cache_dir=None,
        )
        print(type(model))
        if hvd.rank() == 0 and hvd.size() > 1:
            hvd.barrier()

        return model

    def build_criterion(self):

        return CrossEntropyLoss()

    def build_optimizer(self):
        model = self.model()
        config = self.config()['optimizer']
        # Prepare optimizer and schedule (linear warmup and decay)
        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
                "weight_decay": config['weight_decay'],
            },
            {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
        ]

        optimizer = AdamW(optimizer_grouped_parameters, lr=config['lr'], eps=config['epsilon'])
        return optimizer

    def cal_eval_performance(self, batch_outputs, batch_inputs):

        batch = tuple(t.cuda() for t in batch_inputs)

        with torch.no_grad():
            inputs = {
                "input_ids": batch[0],
                "attention_mask": batch[1],
                "token_type_ids": batch[2],
                "return_dict": False,
            }

            if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]:
                del inputs["token_type_ids"]

            feature_indices = batch[3]

            # XLNet and XLM use more arguments for their predictions
            if args.model_type in ["xlnet", "xlm"]:
                inputs.update({"cls_index": batch[4], "p_mask": batch[5]})
                # for lang_id-sensitive xlm models
                if hasattr(model, "config") and hasattr(model.config, "lang2id"):
                    inputs.update(
                        {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
                    )

        outputs = batch_outputs
        all_results = []
        for i, feature_index in enumerate(feature_indices):
            eval_feature = features[feature_index.item()]
            unique_id = int(eval_feature.unique_id)

            output = [to_list(output[i]) for output in outputs]

            # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
            # models only use two.
            if len(output) >= 5:
                start_logits = output[0]
                start_top_index = output[1]
                end_logits = output[2]
                end_top_index = output[3]
                cls_logits = output[4]

                result = SquadResult(
                    unique_id,
                    start_logits,
                    end_logits,
                    start_top_index=start_top_index,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )

            else:
                start_logits, end_logits = output
                result = SquadResult(unique_id, start_logits, end_logits)

            all_results.append(result)

        # Compute predictions
        output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
        output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))

        output_null_log_odds_file = None

        config = self.config()['evaluation']
        predictions = compute_predictions_logits(
            examples,
            features,
            all_results,
            config['n_best_size'],
            config['max_answer_length'],
            config['do_lower_case'],
            output_prediction_file,
            output_nbest_file,
            output_null_log_odds_file,
            True, # Verbose Logging
            False,
            output['null_score_diff_threshold'],
            tokenizer,
        )

        # Compute the F1 and exact scores.
        results = squad_evaluate(examples, predictions)
        return results

    def training_step(self, batch, model):

        #batch = tuple(t.cuda() for t in batch)

        inputs = {
            "input_ids": batch[0],
            "attention_mask": batch[1],
            "token_type_ids": batch[2],
            "start_positions": batch[3],
            "end_positions": batch[4],
        }

        outputs = model(**inputs)
        loss = outputs[0]
        
        return loss, outputs[1:]

       # # special for step()
       # torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
       # optimizer.step()


#def train():
#    conf_yaml = 'mnist.yaml'
#    job = MnistJob(conf_yaml)
#    t = trainer.Trainer(job)
#    t.fit()
#
#if __name__ == '__main__':
#    train()
