# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""


import argparse
import copy
import glob
import json
import logging
import os
import random
import time

import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, SubsetRandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

import warnings



warnings.filterwarnings("ignore")

import sys
sys.path.append("./")

from src.darts_based.visualization import compose_head_weights_gif, compose_layer_weights_gif, draw_head_weights_best, \
    draw_layer_weights_best

from src.darts_based.sub_network import SubClassifierNetwork

from src.transformers.modeling_bert import BertForSequenceClassification1, BertForSequenceClassification2
from src.transformers import (
    WEIGHTS_NAME,
    AdamW,
    AlbertConfig,
    AlbertForSequenceClassification,
    AlbertTokenizer,
    BertConfig,
    BertForSequenceClassification,
    BertTokenizer,
    DistilBertConfig,
    DistilBertForSequenceClassification,
    DistilBertTokenizer,
    FlaubertConfig,
    FlaubertForSequenceClassification,
    FlaubertTokenizer,
    RobertaConfig,
    RobertaForSequenceClassification,
    RobertaTokenizer,
    XLMConfig,
    XLMForSequenceClassification,
    XLMRobertaConfig,
    XLMRobertaForSequenceClassification,
    XLMRobertaTokenizer,
    XLMTokenizer,
    XLNetConfig,
    XLNetForSequenceClassification,
    XLNetTokenizer,
    get_linear_schedule_with_warmup,
)
from src.transformers import glue_compute_metrics as compute_metrics
from src.transformers import glue_convert_examples_to_features as convert_examples_to_features
from src.transformers import glue_output_modes as output_modes
from src.transformers import glue_processors as processors

try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:
    from tensorboardX import SummaryWriter

logger = logging.getLogger(__name__)

ALL_MODELS = sum(
    (
        tuple(conf.pretrained_config_archive_map.keys())
        for conf in (
            BertConfig,
            XLNetConfig,
            XLMConfig,
            RobertaConfig,
            DistilBertConfig,
            AlbertConfig,
            XLMRobertaConfig,
            FlaubertConfig,
        )
    ),
    (),
)

MODEL_CLASSES = {
    # "bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
    "bert": (BertConfig, BertForSequenceClassification1, BertTokenizer),
    # "bert": (BertConfig, BertForSequenceClassification2, BertTokenizer),
    "albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
}


def set_seed(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)


def load_and_cache_examples(args, task, tokenizer, mode="dev"):

    processor = processors[task]()
    output_mode = output_modes[task]
    # Load data features from cache or output file
    cached_features_file = os.path.join(
        args.output_dir,
        "cached_{}_{}_{}_{}".format(
            mode,
            list(filter(None, args.model_name_or_path.split("/"))).pop(),
            str(args.max_seq_length),
            str(task),
        ),
    )
    if os.path.exists(cached_features_file) and not args.overwrite_cache:
        logger.info("Loading features from cached file %s", cached_features_file)
        features = torch.load(cached_features_file)
    else:
        logger.info("Creating features from dataset file at %s", args.data_dir)
        label_list = processor.get_labels()
        if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
            # HACK(label indices are swapped in RoBERTa pretrained model)
            label_list[1], label_list[2] = label_list[2], label_list[1]

        if mode == "train":
            examples = (
                processor.get_train_examples(args.data_dir)
            )
        elif mode == "dev":
            examples = (
                processor.get_dev_examples(args.data_dir)
            )
        else:
            examples = (
                processor.get_test_examples(args.data_dir)
            )

        features = convert_examples_to_features(
            examples,
            tokenizer,
            label_list=label_list,
            max_length=args.max_seq_length,
            output_mode=output_mode,
            pad_on_left=bool(args.model_type in ["xlnet"]),  # pad on the left for xlnet
            pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
            pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
        )

        logger.info("Saving features into cached file %s", cached_features_file)
        torch.save(features, cached_features_file)

    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
    all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
    if output_mode == "classification":
        all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
    elif output_mode == "regression":
        all_labels = torch.tensor([f.label for f in features], dtype=torch.float)

    dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
    return dataset


def train_search(
        args=None,
        train_dataset=None,
        model=None,
        tokenizer=None,
        processor=None
):
    """ Train the model and do search. """

    tb_writer = SummaryWriter()

    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
    t_total = int(np.floor(args.train_portion * len(train_dataset))) * args.num_train_epochs

    # # optimizers
    # for n, p in model.named_parameters():
    #     print(n, p.requires_grad)

    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
                   and p.requires_grad
                   and "arch_" not in n
            ],
            "weight_decay": args.weight_decay,
        },
        {
            "params": [
                p for n, p in model.named_parameters()
                if any(nd in n for nd in no_decay)
                   and p.requires_grad
                   and "arch_" not in n
            ],
            "weight_decay": 0.0
        },
    ]

    optimizer = AdamW(
        optimizer_grouped_parameters, 
        lr=args.learning_rate, 
        eps=args.adam_epsilon
    )
    scheduler = get_linear_schedule_with_warmup(
        optimizer, 
        num_warmup_steps=args.warmup_steps, 
        num_training_steps=t_total
    )

    search_optimizer = AdamW(
        [p for n, p in model.named_parameters() if 'arch' in n],
        lr=args.search_learning_rate,
        eps=args.adam_epsilon
    )

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    global_step = 0

    best_model = None
    best_score = -100.0
    best_results = None
    patience = args.max_patience

    tr_loss, logging_loss = 0.0, 0.0
    model.zero_grad()
    train_iterator = trange(
        0, int(args.num_train_epochs), desc="Epoch", disable=False,
    )
    set_seed(args)  # Added here for reproductibility
    for epoch_idx, _ in enumerate(train_iterator):

        # split part of the train data for arch search, at each epoch starts
        num_train = len(train_dataset)
        indices = list(range(num_train))
        random.shuffle(indices)
        split = int(np.floor(args.train_portion * num_train))

        train_sampler = SubsetRandomSampler(indices[:split])
        train_dataloader = DataLoader(
            train_dataset,
            sampler=train_sampler,
            batch_size=args.train_batch_size,
            pin_memory=True, num_workers=0
        )

        search_sampler = SubsetRandomSampler(indices[split:num_train])
        search_dataloader = DataLoader(
            train_dataset,
            sampler=search_sampler,
            batch_size=args.train_batch_size,
            pin_memory=True, num_workers=0
        )
        
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)

        for step, batch in enumerate(epoch_iterator):

            model.train()
            optimizer.zero_grad()

            batch = tuple(t.to(args.device) for t in batch)
            inputs = {
                "input_ids": batch[0], 
                "attention_mask": batch[1], 
                "token_type_ids": batch[2],
                "labels": batch[3]}
            outputs = model(**inputs)
            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)

            if args.n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu parallel training

            loss.backward()
            tr_loss += loss.item()
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

            optimizer.step()
            scheduler.step()  # Update learning rate schedule
            global_step += 1

            # search step
            if epoch_idx >= args.start_search_epoch:   # >= 3
                search_optimizer.zero_grad()

                search_batch = next(iter(search_dataloader))
                search_batch = tuple(t.to(args.device) for t in search_batch)
                search_inputs = {
                    "input_ids": search_batch[0], 
                    "attention_mask": search_batch[1], 
                    "token_type_ids": search_batch[2],
                    "labels": search_batch[3]
                }
                search_outputs = model(**search_inputs)

                search_loss = search_outputs[0]
                if args.n_gpu > 1:
                    search_loss = search_loss.mean()  # mean() to average on multi-gpu parallel training

                search_loss.backward()
                torch.nn.utils.clip_grad_norm_(model.arch_parameters(), args.max_grad_norm)
                search_optimizer.step()

            # logging step: show the loss on train
            if global_step > 0 and global_step % args.logging_steps == 0:
                logs = {}

                loss_scalar = (tr_loss - logging_loss) / args.logging_steps
                learning_rate_scalar = scheduler.get_lr()[0]
                logs["learning_rate"] = learning_rate_scalar
                logs["loss"] = loss_scalar
                logging_loss = tr_loss

                for key, value in logs.items():
                    tb_writer.add_scalar(key, value, global_step)
                print(json.dumps({**logs, **{"step": global_step}}))

                # save the head_weights and layer_weights
                torch.save(model.head_weights, os.path.join(args.output_dir, "arch_weights/",  "head_weights_step_%d.bin" % global_step))
                torch.save(model.layer_weights, os.path.join(args.output_dir, "arch_weights/", "layer_weights_step_%d.bin" % global_step))


            # do eval when global_step % save_steps == 0
            if global_step > 0 and global_step % args.save_steps == 0:

                logs = {}

                results = evaluate(args, model, tokenizer=tokenizer, processor=processor, mode="dev")
                print("results: ", results)

                for res in results:
                    for key, value in res.items():
                        eval_key = "eval_{}".format(key)
                        logs[eval_key] = value

                loss_scalar = (tr_loss - logging_loss) / args.logging_steps
                learning_rate_scalar = scheduler.get_lr()[0]
                logs["learning_rate"] = learning_rate_scalar
                logs["loss"] = loss_scalar
                logging_loss = tr_loss

                for key, value in logs.items():
                    tb_writer.add_scalar(key, value, global_step)
                    print(json.dumps({**logs, **{"step": global_step}}))

                # update best score and best model
                score_ = sum([res["score"] for res in results]) / len(results)

                if score_ > best_score:
                    patience = args.max_patience
                    best_score = score_
                    best_results = results

                    # save this model
                    model_to_save = (
                        model.module if hasattr(model, "module") else model
                    )  # Take care of distributed/parallel training
                    best_model = copy.deepcopy(model_to_save)

                    # model_to_save.save_pretrained(args.output_dir)

                    output_model_file = os.path.join(args.output_dir, "best_model.bin")
                    torch.save(model_to_save.state_dict(), output_model_file)

                    tokenizer.save_pretrained(args.output_dir)
                    torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
                    logger.info("Saving model checkpoint to %s", args.output_dir)

                    # save the head_weights and layer_weights
                    torch.save(model.head_weights,
                               os.path.join(args.output_dir, "head_weights_best_model.bin"))
                    torch.save(model.layer_weights,
                               os.path.join(args.output_dir, "layer_weights_best_model.bin"))

                else:
                    patience -= 1

                    if patience <= 0:
                        break

            time.sleep(0.5)

    return best_score, best_results, best_model
        

def evaluate(args=None, model=None, tokenizer=None, processor=None, mode="dev"):
    # Loop to handle MNLI double evaluation (matched, mis-matched)
    eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
    eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") \
        if args.task_name == "mnli" else (args.output_dir,)

    results = []
    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
        eval_dataset = load_and_cache_examples(
            args, eval_task, tokenizer, mode=mode
        )

        if not os.path.exists(eval_output_dir):
            os.makedirs(eval_output_dir)

        args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
        # Note that DistributedSampler samples randomly
        eval_sampler = SequentialSampler(eval_dataset)
        eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)

        # multi-gpu eval
        if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
            model = torch.nn.DataParallel(model)

        # Eval!
        logger.info("***** Running evaluation {} *****".format(mode))
        logger.info("  Num examples = %d", len(eval_dataset))
        logger.info("  Batch size = %d", args.eval_batch_size)
        eval_loss = 0.0
        eval_student_loss = 0.0
        nb_eval_steps = 0
        preds = None
        out_label_ids = None
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
            model.eval()
            batch = tuple(t.to(args.device) for t in batch)

            with torch.no_grad():
                inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}

                inputs["token_type_ids"] = (
                    batch[2]
                )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
                outputs = model(**inputs)
                tmp_eval_loss, logits = outputs[:2]  # outputs: loss, logits, pooled_output, hidden_states, attentions

                eval_loss += tmp_eval_loss.mean().item()

            nb_eval_steps += 1
            if preds is None:
                preds = logits.detach().cpu().numpy()
                out_label_ids = inputs["labels"].detach().cpu().numpy()
            else:
                preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(
                    out_label_ids,
                    inputs["labels"].detach().cpu().numpy(),
                    axis=0
                )

        eval_loss = eval_loss / nb_eval_steps
        if args.output_mode == "classification":
            preds = np.argmax(preds, axis=1)
        elif args.output_mode == "regression":
            preds = np.squeeze(preds)
        result = compute_metrics(
            eval_task,
            preds,
            out_label_ids
        )
        results.append(result)

    return results


def main():
    parser = argparse.ArgumentParser()

    # Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
    )
    parser.add_argument(
        "--model_type",
        default=None,
        type=str,
        required=True,
        help="type of the pretrained model",
    )

    parser.add_argument(
        "--model_name_or_path",
        default=None,
        type=str,
        required=True,
        help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
    )
    parser.add_argument(
        "--task_name",
        default=None,
        type=str,
        required=True,
        help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
    )
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )

    # Other parameters
    parser.add_argument(
        "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
    )
    parser.add_argument(
        "--tokenizer_name",
        default="",
        type=str,
        help="Pretrained tokenizer name or path if not the same as model_name",
    )
    parser.add_argument(
        "--cache_dir",
        default="",
        type=str,
        help="Where do you want to store the pre-trained models downloaded from s3",
    )
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help="The maximum total input sequence length after tokenization. Sequences longer "
        "than this will be truncated, sequences shorter will be padded.",
    )

    parser.add_argument(
        "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
    )

    parser.add_argument(
        "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
    )
    parser.add_argument(
        "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before performing a backward/update pass.",
    )

    parser.add_argument(
        "--student_encoder_name",
        default="lstm", type=str,
        help="encoder name for the student model"
    )
    parser.add_argument(
        "--classifier_dropout_prob",
        default=0.1, type=float,
        help="dropout rate for the classification layer."
    )
    parser.add_argument(
        "--train_portion",
        default=0.1, type=float,
        help="proportion of train data used for training, the rest of training data is for search."
    )
    parser.add_argument(
        "--kd_loss_weight",
        default=0.3, type=float,
        help="weight for the kd loss."
    )
    parser.add_argument(
        "--start_search_epoch",
        default=1.0, type=float,
        help="first warm up the student, then do search after start_search_epoch."
    )
    parser.add_argument(
        "--max_patience",
        default=5, type=int,
        help="patience for early stopping."
    )

    parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
    parser.add_argument(
        "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
    )
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
    )
    parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")

    parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
    parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")

    parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
    parser.add_argument(
        "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
    )
    parser.add_argument(
        "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
    )
    parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")


    # new params ---------------------------

    parser.add_argument("--search_learning_rate", default=10e-5, type=float,
                        help="The initial learning rate for Adam.")

    # --------------------------------------


    args = parser.parse_args()

    os.makedirs(args.output_dir, exist_ok=True)
    os.makedirs(os.path.join(args.output_dir, "arch_weights/"), exist_ok=True)
    os.makedirs(os.path.join(args.output_dir, "visualization/"), exist_ok=True)
    if (
        os.path.exists(args.output_dir)
        and os.listdir(args.output_dir)
        and not args.overwrite_output_dir
    ):
        raise ValueError(
            "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
                args.output_dir
            )
        )

    # Setup CUDA, GPU & distributed training
    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
    args.device = device

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger.warning(
        "device: %s, n_gpu: %s, distributed training: %s",
        device,
        args.n_gpu,
        False,
    )

    # Set seed
    set_seed(args)

    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # Load pretrained model and tokenizer
    args.model_type = args.model_type.lower()
    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
    bert_config = config_class.from_pretrained(
        args.config_name if args.config_name else args.model_name_or_path,
        num_labels=num_labels,
        finetuning_task=args.task_name,
        cache_dir=args.cache_dir if args.cache_dir else None,
        output_hidden_states=True,
        output_attentions=True,
    )
    tokenizer = tokenizer_class.from_pretrained(
        args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
        do_lower_case=args.do_lower_case,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    bert_model = model_class.from_pretrained(
        args.model_name_or_path,
        from_tf=bool(".ckpt" in args.model_name_or_path),
        config=bert_config,
        cache_dir=args.cache_dir if args.cache_dir else None,
    )
    bert_model.to(args.device)

    # initialize sub model with architecture parameters for search
    sub_model = SubClassifierNetwork(
        args, bert_config, bert_model
    )
    sub_model.to(args.device)
    sub_model.head_weights.to(args.device)
    sub_model.layer_weights.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # load datasets
    train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, mode="train")
    # dev_dataset = load_and_cache_examples(args, args.task_name, tokenizer, mode="dev")
    # test_dataset = load_and_cache_examples(args, args.task_name, tokenizer, mode="test")

    print("train_dataset: ", len(train_dataset))

    # search training
    best_score, best_results, best_sub_model = train_search(
        args=args,
        train_dataset=train_dataset,
        model=sub_model,
        tokenizer=tokenizer,
        processor=processor,
    )
    print("best_score: ", best_score)
    print("best_results: ", best_results)
    print("best_model head_weights: ", best_sub_model.head_weights)
    print("best_model layer_weights: ", best_sub_model.layer_weights)

    # # save the head_weights and layer_weights
    # torch.save(best_sub_model.head_weights, os.path.join(args.output_dir, "head_weights_best.bin"))
    # torch.save(best_sub_model.layer_weights, os.path.join(args.output_dir, "layer_weights_best.bin"))

    # do test, without pruning
    test_results = evaluate(args, best_sub_model, tokenizer=tokenizer, processor=processor, mode="test")

    print("best_results: ", best_results)
    print("test_results: ", test_results)

    # 生成 head mask gif:
    compose_head_weights_gif(args.output_dir, )

    # 生成 layer weights gif
    compose_layer_weights_gif(args.output_dir, )

    # head weights of best model
    draw_head_weights_best(args.output_dir)
    # layer weights of best model
    draw_layer_weights_best(args.output_dir)



if __name__ == "__main__":
    main()







