import argparse
import time
import pickle
import random

from transformers import *

from basic.Config import *

from driver.Optimizer import *
from driver.Dataloader import *
from driver.Parser import *
from modules.DCDecoder import *

from modules.GlobalEncoder import *

from torch.cuda.amp import autocast as autocast
from torch.cuda.amp.grad_scaler import GradScaler


def train(train_inst, parser, vocab, config, tokenizer):
    bert_param = list(parser.global_encoder.parameters())
    parser_param = list(parser.dc_decoder.parameters())

    model_param = [
        {'params': bert_param, 'lr': config.bert_learning_rate},
        {'params': parser_param, 'lr': config.learning_rate}
    ]

    opt = Optimizer(model_param, config, config.learning_rate)
    scaler = GradScaler()

    global_step = 0
    best_FF = 0
    batch_num = int(np.ceil(len(train_inst) / float(config.train_batch_size)))

    for iter in range(config.train_iters):
        start_time = time.time()
        print('Iteration: ' + str(iter))
        batch_iter = 0

        overall_action_correct,  overall_total_action = 0, 0
        for onebatch in data_iter(train_inst, config.train_batch_size, True):
            doc_inputs = input_variable(onebatch)
            edu_offset = offset_variable(onebatch)
            b_win = [instance.win for instance in onebatch]
            batch_DC_ids = label_variable(onebatch)

            parser.train()
            #with torch.autograd.profiler.profile() as prof:
            with autocast():
                parser.forward(doc_inputs, edu_offset, b_win)
                loss = parser.compute_loss(batch_DC_ids)
                loss = loss / config.update_every

            #loss.backward()
            scaler.scale(loss).backward()
            loss_value = loss.data.item()

            total_actions, correct_actions = parser.compute_accuracy(batch_DC_ids)
            overall_total_action += total_actions
            overall_action_correct += correct_actions
            during_time = float(time.time() - start_time)
            acc = overall_action_correct / overall_total_action
            #acc = 0
            print("Step:%d, Iter:%d, batch:%d, time:%.2f, acc:%.2f, loss:%.8f"
                  %(global_step, iter, batch_iter, during_time, acc, loss_value))
            batch_iter += 1

            if batch_iter % config.update_every == 0 or batch_iter == batch_num:
                scaler.unscale_(opt.optim)
                nn.utils.clip_grad_norm_(parser_param, max_norm=config.clip)
                scaler.step(opt.optim)
                scaler.update()

                opt.schedule()
                opt.zero_grad()
                global_step += 1

        if config.save_after >= 0 and iter >= config.save_after and iter % config.save_interval == 0:
            save_model_path = config.save_model_path + "_" + str(global_step)
            parser.global_encoder.plm_model.save_pretrained(
                save_model_path
            )
            tokenizer.save_pretrained(save_model_path)
            print('Saving model to ', config.save_model_path + "_" + str(global_step))


if __name__ == '__main__':
    print("Process ID {}, Process Parent ID {}".format(os.getpid(), os.getppid()))

    random.seed(666)
    np.random.seed(666)
    torch.cuda.manual_seed(666)
    torch.manual_seed(666)

    # torch version
    print("Torch Version: ", torch.__version__)

    ### gpu
    gpu = torch.cuda.is_available()
    print("GPU available: ", gpu)
    print("CuDNN: \n", torch.backends.cudnn.enabled)

    argparser = argparse.ArgumentParser()
    argparser.add_argument('--config_file', default='examples/default.cfg')
    argparser.add_argument('--model', default='BaseParser')
    argparser.add_argument('--thread', default=4, type=int, help='thread num')
    argparser.add_argument('--use-cuda', action='store_true', default=True)

    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)

    train_data = read_corpus(config.train_file, config.max_edu_num)
    vocab = creatVocab(train_data, config.DC_file, config.min_occur_count)
    #vec = vocab.load_pretrained_embs(config.pretrained_embeddings_file)# load extword table and embeddings

    torch.set_num_threads(args.thread)

    config.use_cuda = False
    if gpu and args.use_cuda: config.use_cuda = True
    print("\nGPU using status: ", config.use_cuda)

    start_a = time.time()

    print("train num: ", len(train_data))

    print('Load pretrained encoder.....')
    tokenizer = AutoTokenizer.from_pretrained(config.plm_dir)
    plm_model = AutoModel.from_pretrained(config.plm_dir)
    print('Load pretrained encoder ok')

    dc_labeling(train_data, vocab)
    doc_tokenization(train_data, tokenizer)

    global_encoder = GlobalEncoder(vocab, config, plm_model)
    dc_decoder = DCDecoder(vocab, config)

    pickle.dump(vocab, open(config.save_vocab_path, 'wb'))

    if config.use_cuda:
        torch.backends.cudnn.enabled = True
        #torch.backends.cudnn.benchmark = True
        global_encoder.cuda()
        dc_decoder.cuda()

    parser = DisParser(global_encoder, dc_decoder, config)
    train(train_data, parser, vocab, config, tokenizer)

