"""Train and evaluate the model"""

import argparse
import random
import logging
import os
import re

import numpy as np
import torch
from torch.optim import Adam
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from tqdm import trange

from pytorch_pretrained_bert import BertForTokenClassification

from data_loader import DataLoader
from evaluate import evaluate
import utils
import time
import sys
from torch.cuda.amp import autocast, GradScaler
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../../../tools/utils/")
from metric import MetricCollector

parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/msra', help="Directory containing the dataset")
parser.add_argument('--bert_model_dir', default='bert-base-chinese-pytorch', help="Directory containing the BERT model in PyTorch")
parser.add_argument('--model_dir', default='experiments/base_model', help="Directory containing params.json")
parser.add_argument('--seed', type=int, default=2019, help="random seed for initialization")
parser.add_argument('--restore_file', default=None,
                    help="Optional, name of the file in --model_dir containing weights to reload before training")
parser.add_argument('--multi_device', default=False, action='store_true', help="Whether to use multiple Devices if available")
parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale', type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
parser.add_argument('--device', default='cpu', type=str, help='Use cpu gpu or mlu device')
parser.add_argument('--device_id', default=None, type=int,
                        help='Use specified device for training, useless in multiprocessing distributed training')
parser.add_argument('--iters', type=int, default=-1, metavar='N',
                        help='train iters per epoch')
parser.add_argument('--batch_size', type=int, default=0,
                        help='training batch size. If set, it will override the parameters in json')
parser.add_argument('--eval_iters', type=int, default=-1, metavar='N',
                        help='eval iters per epoch')
parser.add_argument('--run_epochs', type=int, default=-1,
                        help='epochs to train')
parser.add_argument('--max_bitwidth',
                    action='store_true',
                    help='use Max Bitwidth of MLU training')
parser.add_argument('--cnmix', action='store_true', default=False,
                    help='use cnmix for mixed precision training')
parser.add_argument('--opt_level', type=str, default='O1',
                        help='choose level of mixing precision')
parser.add_argument('--pyamp', action='store_true', default=False,
                    help='use pytorch amp for mixed precision training')

args = parser.parse_args()
if args.device == 'mlu':
    import torch_mlu
    import torch_mlu.core.mlu_model as ct

scaler=None
if args.pyamp:
    scaler = GradScaler()
if args.cnmix:
    import cnmix

def train(model, data_iterator, optimizer, scheduler, params):
    """Train the model on `steps` batches"""
    # set model to training mode
    model.train()
    scheduler.step()

    # a running average object for loss
    loss_avg = utils.RunningAverage()
    batch_avg = utils.RunningAverage()
    end = time.time()
    # for internal benchmark test
    metric_collector = MetricCollector(
            enable_only_benchmark=True,
            record_elapsed_time=True,
            record_hardware_time=True if args.device == "mlu" else False)
    metric_collector.place()

    # Use tqdm for progress bar
    t = trange(params.train_steps)
    step_time_start = time.time()
    for i in t:
        if i == args.iters:
            break
        # fetch the next training batch
        batch_data, batch_tags = next(data_iterator)
        batch_masks = batch_data.gt(0)

        # compute model output and loss
        with autocast(enabled=args.pyamp):
            loss = model(batch_data, token_type_ids=None, attention_mask=batch_masks, labels=batch_tags)

        if params.n_device > 1 and args.multi_device:
            loss = loss.mean()  # mean() to average on multi-gpu

        # clear previous gradients, compute gradients of all variables wrt loss
        model.zero_grad()
        if args.fp16:
            optimizer.backward(loss)
        elif args.cnmix:
            with cnmix.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        elif args.pyamp:
            scaler.scale(loss).backward()
        else:
            loss.backward()

        # gradient clipping
        nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=params.clip_grad)

        # performs updates using calculated gradients
        if args.pyamp:
            scaler.step(optimizer)
            scaler.update()
        else:
            optimizer.step()

        # update the average loss
        loss_avg.update(loss.item())
        batch_avg.update(time.time() - end)
        end = time.time()
        t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
        step_time_start = time.time()

        # MetricCollector record
        metric_collector.record()
        metric_collector.place()

    if args.cnmix:
        precision = args.opt_level
    elif args.pyamp:
        precision = "amp"
    else:
        precision = "fp32"

    # insert metrics and dump metrics
    metric_collector.insert_metrics(
        net = "bert_base_finetune_msra_ner",
        batch_size = params.batch_size,
        precision = precision,
        cards = 1,
        DPF_mode = "single")

    metric_collector.dump()


def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_file=None):
    """Train the model and evaluate every epoch."""
    if args.cnmix:
        model. optimizer = cnmix.initialize(model, optimizer, opt_level=args.opt_level)
        cnmix.core.cnmix_set_amp_quantify_params('all',{'batch_size': params.batch_size,
                                                        'data_num': params.train_size,
                                                        "quantify_rate": 1})
    # reload weights from restore_file if specified
    if restore_file is not None:
        restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_path))
        utils.load_checkpoint(restore_path, model, scaler, optimizer)

    best_val_f1 = 0.0
    patience_counter = 0

    if args.run_epochs != -1:
        run_epochs = args.run_epochs
    else:
        run_epochs = params.epoch_num
    for epoch in range(1, run_epochs + 1):
        # Run one epoch
        logging.info("Epoch {}/{}".format(epoch, run_epochs))

        # Compute number of batches in one epoch
        params.train_steps = params.train_size // params.batch_size
        params.val_steps = params.val_size // params.batch_size

        # data iterator for training
        train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)
        # Train for one epoch on training set
        train(model, train_data_iterator, optimizer, scheduler, params)
        if os.getenv('BENCHMARK_LOG'):
            break

        # data iterator for evaluation
        train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)
        val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)

        # Evaluate for one epoch on training set and validation set
        params.eval_steps = params.train_steps
        #train_metrics = evaluate(model, train_data_iterator, params, args, mark='Train')
        params.eval_steps = params.val_steps
        val_metrics = evaluate(model, val_data_iterator, params, mark='Val')

        val_f1 = val_metrics['f1']
        improve_f1 = val_f1 - best_val_f1

        # Save weights of the network
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        optimizer_to_save = optimizer.optimizer if args.fp16 else optimizer
        checkpoint_state = {'epoch': epoch + 1, 'state_dict': model_to_save.state_dict(), 'optim_dict': optimizer_to_save.state_dict()}
        if args.cnmix:
            checkpoint_state["cnmix"] = cnmix.state_dict()
        if args.pyamp:
            checkpoint_state["amp"] = scaler.state_dict()

        utils.save_checkpoint(checkpoint_state,
                              is_best=improve_f1>0,
                              checkpoint=model_dir)
        if improve_f1 > 0:
            logging.info("- Found new best F1")
            best_val_f1 = val_f1
            if improve_f1 < params.patience:
                patience_counter += 1
            else:
                patience_counter = 0
        else:
            patience_counter += 1

        # Early stopping and logging best f1
        if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == run_epochs:
            logging.info("Best val f1: {:05.2f}".format(best_val_f1))
            break


if __name__ == '__main__':
    # Load the parameters from json file
    json_path = os.path.join(args.model_dir, 'params.json')
    assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)
    params.batch_size = args.batch_size if args.batch_size else params.batch_size
    # Use GPUs if available
    if args.device == 'mlu':
        params.device = torch.device('mlu')
        params.n_device = 1
    else:
        params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        params.n_device = 1
    params.multi_device = args.multi_device

    # Set the random seed for reproducible experiments
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.device == 'gpu' and params.n_device > 0:
        torch.cuda.manual_seed_all(args.seed)  # set random seed for all GPUs
    params.seed = args.seed

    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'train.log'))
    logging.info("device: {}, n_device: {}, 16-bits training: {}".format(params.device, params.n_device, args.fp16))

    # Create the input data pipeline
    logging.info("Loading the datasets...")

    # Initialize the DataLoader
    data_loader = DataLoader(args.data_dir, args.bert_model_dir, params, token_pad_idx=0)

    # Load training data and test data
    train_data = data_loader.load_data('train')
    val_data = data_loader.load_data('val')

    # Specify the training and validation dataset sizes
    params.train_size = train_data['size']
    params.val_size = val_data['size']

    # Prepare model
    model = BertForTokenClassification.from_pretrained(args.bert_model_dir, num_labels=len(params.tag2idx), len_train=len(train_data), args=args)
    model.to(params.device)
    if args.fp16:
        model.half()

    # DP for gpu, mlu not support DP
    if args.device=='gpu' and params.n_device > 1 and args.multi_device:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    if params.full_finetuning:
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        # no_decay = ['bias', 'gamma', 'beta']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
             'weight_decay_rate': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
             'weight_decay_rate': 0.0}
        ]
    else:
        param_optimizer = list(model.classifier.named_parameters())
        optimizer_grouped_parameters = [{'params': [p for n, p in param_optimizer]}]
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError("lease install apex from https://www.github.com/nvidia/apex to use fp16 training.")
        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=params.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1/(1 + 0.05*epoch))
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
    else:
        optimizer = Adam(optimizer_grouped_parameters, lr=params.learning_rate)
        scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1/(1 + 0.05*epoch))

    # Train and evaluate the model
    logging.info("Starting training for {} epoch(s)".format(params.epoch_num))


    train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, args.model_dir, args.restore_file)

