import random
import argparse
import pickle
import time

from transformers import AutoModel, AutoTokenizer, AutoConfig

from basic.Config import *
from basic.Vocab import *
from driver.Dataloader import *

from driver.EDUSegmenter import *
from modules.SegModel import *

from torch.cuda.amp import autocast as autocast
from torch.cuda.amp.grad_scaler import GradScaler

class Optimizer:
    def __init__(self, parameter, config, lr):
        self.optim = torch.optim.Adam(parameter, lr=lr, betas=(config.beta_1, config.beta_2),
                                      eps=config.epsilon, weight_decay=config.l2_reg)
        decay, decay_step = config.decay, config.decay_steps
        l = lambda epoch: decay ** (epoch // decay_step)
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)

    def step(self):
        self.optim.step()
        self.schedule()
        self.optim.zero_grad()
    def schedule(self):
        self.scheduler.step()
    def zero_grad(self):
        self.optim.zero_grad()
    @property
    def lr(self):
        return self.scheduler.get_lr()

def train(train_data, dev_data, test_data, plm_tokenizer, plm_config, segmenter, vocab, config):

    train_instances = inst(train_data)
    word2id(train_instances, plm_tokenizer)
    label2id(train_instances, vocab)

    plm_param = list(segmenter.seg_model.plm_model.parameters())
    seg_param = list(segmenter.seg_model.output_layer.parameters())

    model_param = [{'params': plm_param, 'lr': config.plm_learning_rate},
                   {'params': seg_param, 'lr': config.learning_rate}]

    model_optimizer = Optimizer(model_param, config, config.learning_rate)

    global_step = 0
    best_FF = 0
    batch_num = int(np.ceil(len(train_instances) / float(config.train_batch_size)))

    scaler = GradScaler()

    for iter in range(config.train_iters):
        start_time = time.time()
        print('Iteration: ' + str(iter))
        batch_iter = 0

        overall_label_correct,  overall_total_label = 0, 0
        for onebatch in data_iter(train_instances, config.train_batch_size, True):
            inputs = batch_inputs(onebatch)
            gold_labels = batch_label(onebatch)

            segmenter.train()
            #with torch.autograd.profiler.profile() as prof:
            with autocast():
                segmenter.forward(inputs)
            loss = segmenter.compute_loss(gold_labels)

            loss = loss / config.update_every
            # loss.backward()
            scaler.scale(loss).backward()

            loss_value = loss.data.cpu().numpy()

            total_labels, correct_labels = segmenter.compute_acc(onebatch, vocab)

            overall_total_label += total_labels
            overall_label_correct += correct_labels
            during_time = float(time.time() - start_time)
            acc = overall_label_correct / overall_total_label
            #acc = 0
            print("Step:%d, Iter:%d, batch:%d, time:%.2f, acc:%.2f, loss:%.2f"
                  %(global_step, iter, batch_iter,  during_time, acc, loss_value))
            batch_iter += 1

            if batch_iter % config.update_every == 0 or batch_iter == batch_num:
                scaler.unscale_(model_optimizer.optim)
                nn.utils.clip_grad_norm_(plm_param + seg_param, max_norm=config.clip)

                scaler.step(model_optimizer.optim)
                scaler.update()
                model_optimizer.schedule()

                model_optimizer.zero_grad()
                global_step += 1

            if batch_iter % config.validate_every == 0 or batch_iter == batch_num:
                print("Dev:")
                predict(dev_data, segmenter, vocab, config, plm_tokenizer, config.dev_file + '.' + str(global_step))
                dev_FF = scripts_evaluate(config, config.dev_file, config.dev_file + '.' + str(global_step))

                #dev_FF = evaluate(config.dev_file, config.dev_file + '.' + str(global_step))
                print("Test:")
                predict(test_data, segmenter, vocab, config, plm_tokenizer, config.test_file + '.' + str(global_step))
                scripts_evaluate(config, config.test_file, config.test_file + '.' + str(global_step))

                if dev_FF > best_FF:
                    print("Exceed best Full F-score: history = %.2f, current = %.2f" % (best_FF, dev_FF))
                    best_FF = dev_FF
                    if config.save_after >= 0 and iter >= config.save_after:

                        segmenter.seg_model.plm_model.save_pretrained(config.plm_save_dir)
                        plm_config.save_pretrained(config.plm_save_dir)
                        plm_tokenizer.save_pretrained(config.plm_save_dir)

                        segmenter_model = {
                            "dec": segmenter.seg_model.output_layer.state_dict(),
                        }

                        torch.save(segmenter_model, config.save_model_path)
                        print('Saving model to ', config.save_model_path)

def scripts_evaluate(config, gold_file, predict_file):
    cmd = "python %s %s %s" % (config.eval_scripts, gold_file, predict_file)
    F_exec = os.popen(cmd).read()
    info = F_exec.strip().split("\n")
    fscore = info[-1].split(': ')[-1]
    print(' '.join(info))
    return float(fscore)

def evaluate(gold_file, predict_file):
    gold_data = read_corpus(gold_file, True)
    predict_data = read_corpus(predict_file, True)
    seg_metric = Metric()
    for gold_doc, predict_doc in zip(gold_data, predict_data):
        gold_edus = gold_doc.extract_EDUstr()
        predict_edus = predict_doc.extract_EDUstr()
        seg_metric.overall_label_count += len(gold_edus)
        seg_metric.predicated_label_count += len(predict_edus)
        seg_metric.correct_label_count += len(set(gold_edus) & set(predict_edus))
    print("edu seg:", end=" ")
    seg_metric.print()
    return seg_metric.getAccuracy()

def predict(data, segmenter, vocab, config, plm_tokenizer, outputFile):
    start = time.time()
    segmenter.eval()
    outf = open(outputFile, mode='w', encoding='utf8')
    for docbatch in data_iter(data, 10, False):
        predict_instances = inst(docbatch)
        word2id(predict_instances, plm_tokenizer)
        batch_labels = []
        for sentbatch in data_iter(predict_instances, config.test_batch_size, False):
            inputs = batch_inputs(sentbatch)
            # with torch.autograd.profiler.profile() as prof:
            with autocast():
                segmenter.forward(inputs)
            batch_labels += segmenter.labeling(sentbatch, vocab)

        offset = 0
        batch_size = len(docbatch)
        for idx in range(batch_size):
            doc = docbatch[idx]
            outf.write(doc.firstline + '\n')
            for sentence_conll in doc.sentences_conll:
                predict_labels = batch_labels[offset]

                word_count = 0
                for line in sentence_conll:
                    info = line.split('\t')
                    if len(info) == 10:
                        word_count += 1
                assert len(predict_labels) == word_count

                word_offset = 0
                for idx, line in enumerate(sentence_conll):

                    info = line.split('\t')
                    if len(info) == 10:
                        if predict_labels[word_offset] == 'b' or word_offset == 0:
                            predict_label = 'BeginSeg=Yes'
                        elif predict_labels[word_offset] == 'i':
                            predict_label = '_'
                        info = line.strip().split("\t")
                        info[9] = predict_label
                        predict_line = '\t'.join(info)

                        word_offset += 1
                    else:
                        predict_line = line

                    outf.write(predict_line + '\n')
                offset += 1
                outf.write('\n')
    outf.close()
    end = time.time()
    during_time = float(end - start)
    print("doc num: %d, segment time = %.2f " % (len(data), during_time))

if __name__ == '__main__':
    ### process id
    print("Process ID {}, Process Parent ID {}".format(os.getpid(), os.getppid()))

    random.seed(666)
    np.random.seed(666)
    torch.cuda.manual_seed(666)
    torch.manual_seed(666)

    ### gpu
    gpu = torch.cuda.is_available()
    print("GPU available: ", gpu)
    print("CuDNN: \n", torch.backends.cudnn.enabled)

    argparser = argparse.ArgumentParser()
    argparser.add_argument('--config_file', default='examples/default.cfg')
    argparser.add_argument('--model', default='BaseParser')
    argparser.add_argument('--thread', default=4, type=int, help='thread num')
    argparser.add_argument('--use-cuda', action='store_true', default=True)

    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)

    train_data = read_corpus(config.train_file)
    dev_data = read_corpus(config.dev_file)
    test_data = read_corpus(config.test_file)

    vocab = creatVocab(train_data)
    pickle.dump(vocab, open(config.save_vocab_path, 'wb'))

    torch.set_num_threads(args.thread)

    config.use_cuda = False
    if gpu and args.use_cuda: config.use_cuda = True
    print("\nGPU using status: ", config.use_cuda)

    print('Load pretrained plm.....')
    plm_model = AutoModel.from_pretrained(config.bert_dir)
    plm_config = AutoConfig.from_pretrained(config.bert_dir)
    plm_tokenizer = AutoTokenizer.from_pretrained(config.bert_dir, add_prefix_space=True)
    print('Load pretrained plm ok')

    seg_model = SegModel(plm_model, plm_config, vocab)
    if config.use_cuda:
        seg_model.cuda()
    segmenter = EDUSegmenter(seg_model, config)

    train(train_data, dev_data, test_data, plm_tokenizer, plm_config, segmenter, vocab, config)
