import random
import time
import sys

sys.path.append("./")
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim


from model.framework import MoVE_model
from utils.data import Data
from utils.utils import *
from utils.data_manager import *
from utils.metric import *
from tqdm import tqdm
from eval import *
from optimizer import *
from loss import *
from torch.utils.tensorboard import SummaryWriter


def save_model(args, model, epoch, f1, auc):

    model_name = "{}_f1{:.3f}_auc{:.3f}_ep_{}".format(
        args.saved_model, f1 * 100, auc * 100, epoch
    )
    torch.save(model.state_dict(), model_name)
    logger.info("Saving to {}!".format(model_name))


def batchify_with_label(input_batch_list, gpu=True):

    batch_size = len(input_batch_list)
    words = [sent[0] for sent in input_batch_list]
    biwords = [sent[1] for sent in input_batch_list]
    chars = [sent[2] for sent in input_batch_list]

    # --------------------
    gazs = [sent[3] for sent in input_batch_list]
    labels = [sent[4] for sent in input_batch_list]
    layer_gazs = [sent[5] for sent in input_batch_list]
    gaz_count = [sent[6] for sent in input_batch_list]
    gaz_chars = [sent[7] for sent in input_batch_list]
    gaz_mask = [sent[8] for sent in input_batch_list]
    gazchar_mask = [sent[9] for sent in input_batch_list]
    # ----------------------
    ### bert tokens
    bert_ids = [sent[10] for sent in input_batch_list]

    scopes = [[i,i+1] for i in range(batch_size) ]
    

    word_seq_lengths = torch.LongTensor(list(map(len, words)))
    max_seq_len = word_seq_lengths.max()
    word_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()
    biword_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()

    label_seq_tensor = torch.tensor(labels).long()

    # label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len))).long()

    mask = autograd.Variable(torch.zeros((batch_size, max_seq_len))).byte()
    ### bert seq tensor
    bert_seq_tensor = autograd.Variable(
        torch.zeros((batch_size, max_seq_len + 2))
    ).long()
    bert_mask = autograd.Variable(torch.zeros((batch_size, max_seq_len + 2))).long()

    gaz_num = [len(layer_gazs[i][0][0]) for i in range(batch_size)]
    max_gaz_num = max(gaz_num)
    layer_gaz_tensor = torch.zeros(batch_size, max_seq_len, 4, max_gaz_num).long()
    gaz_count_tensor = torch.zeros(batch_size, max_seq_len, 4, max_gaz_num).float()
    gaz_len = [len(gaz_chars[i][0][0][0]) for i in range(batch_size)]
    max_gaz_len = max(gaz_len)
    gaz_chars_tensor = torch.zeros(
        batch_size, max_seq_len, 4, max_gaz_num, max_gaz_len
    ).long()
    gaz_mask_tensor = torch.ones(batch_size, max_seq_len, 4, max_gaz_num).byte()
    gazchar_mask_tensor = torch.ones(
        batch_size, max_seq_len, 4, max_gaz_num, max_gaz_len
    ).byte()

    for b, (
        seq,
        bert_id,
        biseq,
        label,
        seqlen,
        layergaz,
        gazmask,
        gazcount,
        gazchar,
        gazchar_mask,
        gaznum,
        gazlen,
    ) in enumerate(
        zip(
            words,
            bert_ids,
            biwords,
            labels,
            word_seq_lengths,
            layer_gazs,
            gaz_mask,
            gaz_count,
            gaz_chars,
            gazchar_mask,
            gaz_num,
            gaz_len,
        )
    ):

        word_seq_tensor[b, :seqlen] = torch.LongTensor(seq)
        biword_seq_tensor[b, :seqlen] = torch.LongTensor(biseq)
        label_seq_tensor[b, :seqlen] = torch.LongTensor(label)

        label_seq_tensor[b, :seqlen] = torch.LongTensor(label)

        layer_gaz_tensor[b, :seqlen, :, :gaznum] = torch.LongTensor(layergaz)
        mask[b, :seqlen] = torch.Tensor([1] * int(seqlen))
        bert_mask[b, : seqlen + 2] = torch.LongTensor([1] * int(seqlen + 2))
        gaz_mask_tensor[b, :seqlen, :, :gaznum] = torch.ByteTensor(gazmask)
        gaz_count_tensor[b, :seqlen, :, :gaznum] = torch.FloatTensor(gazcount)
        gaz_count_tensor[b, seqlen:] = 1
        gaz_chars_tensor[b, :seqlen, :, :gaznum, :gazlen] = torch.LongTensor(gazchar)
        gazchar_mask_tensor[b, :seqlen, :, :gaznum, :gazlen] = torch.ByteTensor(
            gazchar_mask
        )

        ##bert
        bert_seq_tensor[b, : seqlen + 2] = torch.LongTensor(bert_id)

    if gpu:
        word_seq_tensor = word_seq_tensor.cuda()
        biword_seq_tensor = biword_seq_tensor.cuda()
        word_seq_lengths = word_seq_lengths.cuda()
        label_seq_tensor = label_seq_tensor.cuda()
        layer_gaz_tensor = layer_gaz_tensor.cuda()
        gaz_chars_tensor = gaz_chars_tensor.cuda()
        gaz_mask_tensor = gaz_mask_tensor.cuda()
        gazchar_mask_tensor = gazchar_mask_tensor.cuda()
        gaz_count_tensor = gaz_count_tensor.cuda()
        mask = mask.cuda()
        bert_seq_tensor = bert_seq_tensor.cuda()
        bert_mask = bert_mask.cuda()

    # print(bert_seq_tensor.type())
    return (
        gazs,
        word_seq_tensor,
        biword_seq_tensor,
        word_seq_lengths,
        label_seq_tensor,
        layer_gaz_tensor,
        gaz_count_tensor,
        gaz_chars_tensor,
        gaz_mask_tensor,
        gazchar_mask_tensor,
        mask,
        bert_seq_tensor,
        bert_mask,
        scopes
    )


# Train the model
def train(data, args, writer):

    model = MoVE_model(data, args).cuda()
    logger.info("=="*10)
    logger.info(model)
    logger.info("=="*10)


    # t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
    total_steps = (len(data.train_Ids) // args.batch_size) * args.epochs + 1
    # total_steps = (len(data.train_Ids) )*args.epochs +1
    optimizer, lr_scheduler = configure_optimizers(model, args, total_steps=total_steps)

    loss_fn = configure_loss(data.weights, type=args.loss_type)

    best_f1 = -1
    best_auc = -1
    best_epoch = 0

    for epoch in range(args.epochs):
        print("Epoch: %s/%s" % (epoch, args.epochs))

        model.train()
        random.shuffle(data.train_Ids)
        ## set model in train model

        model.zero_grad()

        instance_count = 0
        batch_loss = 0
        total_loss = 0
        batch_size = args.batch_size
        batch_id = 0
        train_num = len(data.train_Ids)
        total_batch = train_num // batch_size + 1

        for batch_id in tqdm(range(total_batch), desc=" Batch iter "):
            model.train()

            start = batch_id * batch_size
            end = (batch_id + 1) * batch_size
            if end > train_num:
                end = train_num

            instance = data.train_Ids[start:end]
            # words = data.train_texts[start:end]

            if not instance:
                continue

            (
                gaz_list,
                batch_word,
                batch_biword,
                batch_wordlen,
                batch_label,
                layer_gaz,
                gaz_count,
                gaz_chars,
                gaz_mask,
                gazchar_mask,
                mask,
                batch_bert,
                bert_mask,
                scopes
            ) = batchify_with_label(instance)

            instance_count += 1

            if args.fuse_type == "moe" or args.fuse_type == "hmoe":
                prob, aux_loss = model(
                    batch_word,
                    batch_biword,
                    layer_gaz,
                    gaz_count,
                    gaz_chars,
                    gaz_mask,
                    gazchar_mask,
                    batch_bert,
                    bert_mask,
                    batch_label,
                    scopes
                )

            else:
                prob = model(
                    batch_word,
                    batch_biword,
                    layer_gaz,
                    gaz_count,
                    gaz_chars,
                    gaz_mask,
                    gazchar_mask,
                    batch_bert,
                    bert_mask,
                    batch_label,
                    scopes
                )

            # TODO
            if args.loss_type == "bce":
                batch_label = torch.nn.functional.one_hot(
                    batch_label.squeeze().to(torch.int64), data.label_alphabet_size
                ).float()
                loss = loss_fn(prob, batch_label)
            else:
                loss = loss_fn(prob, batch_label.squeeze())

            if args.fuse_type == "moe" or args.fuse_type == "hmoe":
                loss = 0.5 * loss + 0.5 * aux_loss

            batch_loss += loss
            total_loss += loss.data

            # print("Epoch :{}, end: {}, loss {}.".format(epoch, end, batch_loss))

            if end % args.batch_size == 0:
                batch_loss.backward()
                optimizer.step()
                lr_scheduler.step()
                model.zero_grad()
                batch_loss = 0

        writer.add_scalar("Loss/train", total_loss, epoch)
        # writer.add_scalar('learning rate', optimizer.param_groups[0]['lr'], end)
        writer.add_scalar("learning rate", lr_scheduler.get_last_lr()[0], epoch)

        # Validation
        precision, recall, f1, auc = evaluate(data, model, args, "dev")
        writer.add_scalar("F1/Val", f1, epoch)
        writer.add_scalar("AUC/Val", auc, epoch)
        writer.add_scalar("P/Val", precision, epoch)
        writer.add_scalar("R/Val", recall, epoch)

        # if epoch >= args.epochs // 4 and auc > best_auc:
        if auc > best_auc:

            print("\tCurrent auc ", auc, "exceed previous best auc:", best_auc)
            # save_model(args, model, epoch, f1, auc)
            best_auc = auc

        # if epoch >= args.epochs // 4 and f1 > best_f1:
        if f1 > best_f1:

            print("\tCurrent f1 ", f1, "exceed previous best f1:", best_f1)
            # save_model(args, model, epoch, f1, auc)
            best_f1 = f1
            best_epoch = epoch

        # Testing
        precision, recall, f1, auc = evaluate(data, model, args, "test")

        writer.add_scalar("F1/Test", f1, epoch)
        writer.add_scalar("AUC/Test", auc, epoch)
        writer.add_scalar("P/Test", precision, epoch)
        writer.add_scalar("R/Test", recall, epoch)

        writer.add_scalar("F1/Best_F1", best_f1, epoch)
        writer.add_scalar("AUC/Best_AUC", best_auc, epoch)

        logger.warning(
            "Best epoch at {},  Best dev F1:{:.3f}, Best Auc:{:.3f} !".format(
                best_epoch, best_f1, best_auc
            )
        )


def seed_everything(seed):
    if seed >= 10000:
        raise ValueError("seed number should be less than 10000")
    if torch.distributed.is_initialized():
        rank = torch.distributed.get_rank()
    else:
        rank = 0
    seed = (rank * 100000) + seed

    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="Tuning")

    parser.add_argument("--saved_model", type=str, default="model_saved/SciRE.pkl")
    parser.add_argument("--saved_data", type=str, default="data_saved/SciRE.pkl")
    parser.add_argument("--saved_log", type=str, default="logs/SciRE.log")
    parser.add_argument("--tensorboard_logdir", type=str, default="runs/toy")

    parser.add_argument("--public_path", type=str, default="data")
    parser.add_argument("--dataset", type=str, default="SciRE")
    parser.add_argument("--train_file", type=str, default="train.txt")
    parser.add_argument("--dev_file", type=str, default="dev.txt")
    parser.add_argument("--test_file", type=str, default="test.txt")
    parser.add_argument("--relation2id", type=str, default="rel2id.txt")

    parser.add_argument(
        "--char_emb", type=str, default="lexicon/gigaword_chn.all.a2b.uni.ite50.vec"
    )
    parser.add_argument(
        "--bichar_emb", type=str, default="lexicon/gigaword_chn.all.a2b.bi.ite50.vec"
    )
    parser.add_argument("--gaz_emb", type=str, default="lexicon/ctb.50d.vec")

    parser.add_argument("--char_emb_file", type=str, default="vec.txt")
    parser.add_argument("--sense_emb_file", type=str, default="sense.txt")
    parser.add_argument("--word_sense_map", type=str, default="sense_map.txt")

    parser.add_argument(
        "--bert_path", type=str, default=r"D:\huggingface\bert-base-chinese"
    )

    parser.add_argument(
        "--optimizer", type=str, default="AdamW", choices=["AdamW", "SGD"]
    )
    parser.add_argument("--loss_type", type=str, default="ce", choices=["bce", "ce"])
    parser.add_argument("--epochs", type=int, default=20)
    parser.add_argument("--batch_size", type=int, default=32)
    parser.add_argument("--warmup_ratio", type=float, default=0.2)
    parser.add_argument("--warmup_type", type=str, default="constant")
    parser.add_argument("--learning_rate", type=float, default=5e-5)
    parser.add_argument("--weight_decay", type=float, default=0.01)
    parser.add_argument("--clip_grad", type=int, default=5)
    parser.add_argument("--max_length", type=int, default=128)
    parser.add_argument("--weights_mode", type=str, default="smooth")

    parser.add_argument(
        "--fuse_type",
        choices=["concat", "attention", "moe", "hmoe"],
        type=str,
        default="concat",
    )

    args = parser.parse_args()

    # =====
    
    writer = SummaryWriter(log_dir=args.tensorboard_logdir)
    from buff import logo #启动图像没有意义

    logger.info("\n{}".format(logo))
    from model.config_layer import CONFIG #导入参数配置
    
    logger.add(sink=os.path.join(args.tensorboard_logdir, 'log.log'))
    save_hyperparams(args, CONFIG)##保存输入参数和配置
    seed_everything(42)
    # =======

    # data initilization .....
    # if os.path.exists(args.saved_data):
    #     data = load_data_setting(args.saved_data)#加载保存的数据
    # else:
    #     data = load_data(args)#加载字典
    #     save_data_setting(data, args.saved_data)#保存数据
    data = load_data(args)  # 加载字典
    save_data_setting(data, args.saved_data)  # 保存数据
    logger.info("Ready for training.")
    train(data, args, writer)
