import os
import torch
import argparse
import random
import pickle
import time
import numpy as np

from basic.Config import *
from basic.Vocab import *
from basic.Optimizer import *

from driver.InstanceReader import *
from driver.Dataset import *
from driver.Aligner import *

from modules.AutoTune import *
from modules.GlobalEncoder import *
from modules.Decoder import *

from transformers.models.auto.tokenization_auto import AutoTokenizer

from torch.cuda.amp import autocast as autocast
from torch.cuda.amp.grad_scaler import GradScaler

def train(train_data, aligner, vocab, config):
    auto_param = list(aligner.global_encoder.auto_extractor.parameters())

    align_param = list(aligner.global_encoder.p_s.parameters()) + \
                   list(aligner.global_encoder.p_t.parameters()) + \
                   list(aligner.global_encoder.p_neg_s.parameters()) + \
                   list(aligner.global_encoder.p_neg_t.parameters()) + \
                   list(aligner.decoder.parameters())

    model_param = [
        {'params': auto_param, 'lr': config.plm_learning_rate},
        {'params': align_param, 'lr': config.learning_rate}
    ]

    model_optimizer = Optimizer(model_param, config, config.learning_rate)
    scaler = GradScaler()

    global_step = 0
    batch_num = int(np.ceil(len(train_data) / float(config.train_batch_size)))

    for iter in range(config.train_iters):
        start_time = time.time()
        print('Iteration: ' + str(iter))
        batch_iter = 0

        for onebatch in data_iter(train_data, config.train_batch_size, True):

            s_input_ids, s_att_mask, t_input_ids, t_att_mask \
                = input_variable(onebatch)
            neg_s_input_ids, neg_s_att_mask, neg_t_input_ids, neg_t_att_mask \
                = constrast_variable(onebatch)

            aligner.train()
            with autocast():
                aligner.forward(
                    s_input_ids, s_att_mask, t_input_ids, t_att_mask,
                    neg_s_input_ids, neg_s_att_mask, 
                    neg_t_input_ids, neg_t_att_mask
                )

            loss = aligner.compute_loss()

            loss = loss / config.update_every
            #loss.backward()
            scaler.scale(loss).backward()
            loss_value = loss.data.item()

            during_time = float(time.time() - start_time)
            print("Step:%d, Iter:%d, batch:%d, time:%.2f, loss:%.8f" \
                %(global_step, iter, batch_iter,  during_time, loss_value))
            batch_iter += 1

            if batch_iter % config.update_every == 0 or batch_iter == batch_num:
                scaler.unscale_(model_optimizer.optim)
                nn.utils.clip_grad_norm_(auto_param + align_param, max_norm=config.clip)

                scaler.step(model_optimizer.optim)
                scaler.update()
                model_optimizer.schedule()

                model_optimizer.zero_grad()
                global_step += 1

        if config.save_after >= 0 and iter >= config.save_after:
            save_plm_path = config.plm_save_dir + "_" + str(iter)
            aligner.global_encoder.auto_extractor.auto_model.save_pretrained(save_plm_path)
            aligner.global_encoder.auto_extractor.tokenizer.save_pretrained(save_plm_path)
            print('Saving model to ', save_plm_path)
    return


if __name__ == '__main__':
    print("Process ID {}, Process Parent ID {}".format(os.getpid(), os.getppid()))

    # torch version
    print("Torch Version: ", torch.__version__)
    ### gpu
    gpu = torch.cuda.is_available()
    print("GPU available: ", gpu)
    print("CuDNN: ", torch.backends.cudnn.enabled)

    argparser = argparse.ArgumentParser()
    argparser.add_argument('--config_file', default='examples/default.cfg')
    argparser.add_argument('--n_worker', default=0, type=int, help='worker num')
    argparser.add_argument('--use_cuda', action='store_true')
    argparser.add_argument('--thread', default=1, type=int, help='thread num')

    args = argparser.parse_args()

    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)

    random.seed(config.seed)
    np.random.seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    torch.manual_seed(config.seed)

    torch.set_num_threads(args.thread)

    use_cuda = False
    if gpu and args.use_cuda: use_cuda = True
    print("\nGPU using status: ", use_cuda)

    train_instances = read_instances(config.s_train_file, config.t_train_file)
    print("train num: ", len(train_instances))

    vocab = createVocab(train_instances)

    print('Load pretrained encoder: ', config.plm_dir)
    tokenizer = AutoTokenizer.from_pretrained(config.plm_dir)
    plm_extractor = AutoModelExtractor(config.plm_dir, config, tokenizer)
    print('Load pretrained encoder ok')

    token2id(train_instances, tokenizer, vocab)
    f_train_instances = filter(train_instances, config.max_instance, config.max_seq_len)
    print("train num:", len(f_train_instances))

    cl_sample(f_train_instances, config.negative)

    pickle.dump(vocab, open(config.save_vocab_path, 'wb'))

    global_encoder = GlobalEncoder(vocab, config, plm_extractor)
    decoder = Decoder(vocab, config)
    
    if use_cuda:
        torch.backends.cudnn.enabled = True
        #torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = True
        global_encoder.cuda()
        decoder.cuda()

    aligner = Aligner(global_encoder, decoder, config)

    train(f_train_instances, aligner, vocab, config)

    pass