import argparse
import logging
from datetime import datetime

import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils.data import ConcatDataset
from transformers import BertTokenizer, BertConfig, AdamW, BertModel

from dataloading import load_and_cache_examples
from evaluate import evaluate
# from model import BertEncoder, Classifier
from relabel import get_relabel_proto, get_relabel_proto_test
from trainer import train_current_task
from utils import get_labels_dy

torch.set_float32_matmul_precision('high')

import os
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

def str2bool(v):
    if isinstance(v, bool):
       return v
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')


def train(
        model_path,
        labels_list,
        pad_token_id,
        output_dir,
        data_dir,
        step_id,
        logger,
        ):
    print("=======================step: {}=======================".format(step_id))



    prev_encoder = None
    prev_classifier = None


    config = BertConfig.from_pretrained(args.base_model_path)
    config.num_labels = len(labels_list)
    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)
    args.tokenizer = tokenizer

    model = BertEncoder(args, tokenizer)
    classifier = Classifier(args, args.hidden_dim, len(labels_list))

    if args.usept2:
        if args.server == "108" or (args.server == "107" and args.device == "cuda:0"):
            print("use pytorch2.0")
            classifier = torch.compile(classifier)
            model = torch.compile(model)

    model.to(args.device)
    classifier.to(args.device)

    random_vectors = torch.randint(100, 10000, (8, 128))
    mask = torch.ones_like(random_vectors)
    random_vectors[:, 0] = 101
    random_vectors[:, -1] = 102

    random_vectors = random_vectors.to(args.device)
    mask = mask.to(args.device)
    for i in range(10):
        model(random_vectors, mask)

    if step_id != 0:
        if args.no_start_with_bert_base and args.start_task == step_id:
            mp = model_path
            if args.server == "108":
                model_path = args.pre_model_path
            elif args.server == "107":
                model_path = args.pre_model_path
            else:
                model_path = args.pre_model_path
            with open(args.best_file, "a") as f:
                f.write("base_model_path: {}\n".format(model_path))
        print("================loading prev model================")
        print("loading encoder ... from ", model_path + "/encoder.pt")
        model_state_dict = torch.load(model_path + "/encoder.pt")
        prev_classifier = Classifier(args, args.hidden_dim, len(labels_list) - args.per_types[step_id])
        print("loading classifier ... from ", model_path + "/classifier.pt")
        classifier_state_dict = torch.load(model_path + "/classifier.pt")
        if args.usept2 and (args.server == "108" or (args.server == "107" and args.device == "cuda:0")):
            prev_classifier = torch.compile(prev_classifier)
            if not "_orig_mod." in list(model_state_dict.keys())[0]:
                model_state_dict = {"_orig_mod." + k: v for k, v in model_state_dict.items()}
                classifier_state_dict = {"_orig_mod." + k: v for k, v in classifier_state_dict.items()}
        else:
            if "_orig_mod." in list(model_state_dict.keys())[0]:
                model_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in model_state_dict.items()}
                classifier_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in classifier_state_dict.items()}
        model.load_state_dict(model_state_dict)
        prev_encoder = model
        prev_classifier.load_state_dict(classifier_state_dict)
        prev_classifier.to(args.device)
        if args.no_start_with_bert_base and args.start_task == step_id:
            model_path = mp

    train_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="train",
        logger=logger,
        task_num=step_id
    )

    if args.use_memory:
        memory_dataset = load_and_cache_examples(
            args=args,
            tokenizer=tokenizer,
            labels=labels_list,
            pad_id=pad_label_id,
            data_dir=data_dir,
            mode="memory",
            logger=logger,
            task_num=step_id)

        train_dataset = ConcatDataset([train_dataset, memory_dataset])

    dev_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="dev",
        logger=logger,
        task_num=step_id)

    test_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=pad_label_id,
        data_dir=data_dir,
        mode="test",
        logger=logger,
        task_num=step_id)

    print("========fininsh loading data=========")


    current_encoder = model
    current_classifier = classifier

    # return current_encoder, current_classifier


    micro_f1, macro_f1 = train_current_task(
        args=args,
        encoder=current_encoder,
        prev_encoder=prev_encoder,
        classifier=current_classifier,
        prev_classifier=prev_classifier,
        train_dataset=train_dataset,
        dev_dataset=dev_dataset,
        test_dataset=test_dataset,
        labels_list=labels_list,
        output_dir=output_dir,
        task_id=step_id
     )

    # current_encoder.load_state_dict(torch.load(os.path.join(output_dir, "encoder.pt")))
    # current_classifier.load_state_dict(torch.load(os.path.join(output_dir, "classifier.pt")))
    torch.save(args, os.path.join(output_dir, "training_args.bin"))



    return micro_f1, \
           macro_f1, \


def test(args, step_id, data_dir, labels_list):
    print("=======================test for step: {}=======================".format(step_id))
    model_path = args.test_path + "/task_{}".format(step_id)
    data_dir = data_dir

    config = BertConfig.from_pretrained(args.base_model_path)
    config.num_labels = len(labels_list)
    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)

    dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=-100,
        data_dir=data_dir,
        mode="test",
        logger=logger)

    model = BertEncoder(args, tokenizer)
    classifier = Classifier(args, args.hidden_dim, len(labels_list))

    if args.usept2:
        if args.server == "108" or (args.server == "107" and args.device == "cuda:0"):
            print("use pytorch2.0")
            classifier = torch.compile(classifier)
            model = torch.compile(model)

    model.to(args.device)
    classifier.to(args.device)




    print("================loading prev model================")
    print("loading encoder ... from ", model_path + "/encoder.pt")
    model_state_dict = torch.load(model_path + "/encoder.pt")
    classifier = Classifier(args, args.hidden_dim, len(labels_list))
    print("loading classifier ... from ", model_path + "/classifier.pt")
    classifier_state_dict = torch.load(model_path + "/classifier.pt")
    if args.usept2 and (args.server == "108" or (args.server == "107" and args.device == "cuda:0")):
        prev_classifier = torch.compile(classifier)
        if not "_orig_mod." in list(model_state_dict.keys())[0]:
            model_state_dict = {"_orig_mod." + k: v for k, v in model_state_dict.items()}
            classifier_state_dict = {"_orig_mod." + k: v for k, v in classifier_state_dict.items()}
    else:
        if "_orig_mod." in list(model_state_dict.keys())[0]:
            model_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in model_state_dict.items()}
            classifier_state_dict = {k.split("_orig_mod.")[-1]: v for k, v in classifier_state_dict.items()}
    model.load_state_dict(model_state_dict)
    encoder = model
    classifier.load_state_dict(classifier_state_dict)
    classifier.to(args.device)


    memory_dataset = load_and_cache_examples(
        args=args,
        tokenizer=tokenizer,
        labels=labels_list,
        pad_id=-100,
        data_dir=data_dir,
        mode="memory",
        logger=logger)

    print(memory_dataset.__len__())

    proto_types, proto_features, ent_num = get_relabel_proto_test(
        args=args,
        prev_encoder=encoder,
        prev_classifier=classifier,
        train_dataset=memory_dataset,
        step_id=step_id,
        labels_list=labels_list)
    print("proto_types: ", proto_types.shape)
    print("proto_features: ", proto_features.shape)
    print("ent_num: ", ent_num)


    macro_f1, micro_f1, preds_list, preds, out_label_ids, results, loss_tup = evaluate(
    args=args,
    model=encoder,
    classifier=classifier,
    valdata=dataset,
    task_id=step_id,
    labels_list=labels_list,
    proto_features=proto_features,
    tsne=False,
    cal_loss=True,
    mode="test"

    )
    with open(args.test_path + "/res.txt", "a") as f:
        f.write("alpha: {}, macro_f1: {:.4f}, micro_f1: {:.4f}\n".format(args.alpha, micro_f1["f1"], macro_f1["f1"]))

    return micro_f1["f1"], macro_f1["f1"]


class BertEncoder(nn.Module):
    def __init__(self, args, tokenizer):
        super().__init__()
        self.args = args
        self.tokenizer = tokenizer
        self.model = BertModel.from_pretrained(args.base_model_path)
        self.dropout = nn.Dropout(0.1)

        hidden_size = self.model.config.hidden_size
        self.head = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.GELU(),
            nn.Linear(hidden_size, self.args.feature_dim)
        )

    def forward(self, input_ids, attention_mask):
        # time_input = datetime.now()
        hidden = self.model(input_ids, attention_mask=attention_mask)['last_hidden_state']
        # time_output = datetime.now()
        # print("bert time: ", time_output - time_input)


        feature = self.head(hidden)
        feature = torch.nn.functional.normalize(feature, p=2, dim=-1)
        hidden = self.dropout(hidden)
        output = (hidden, feature)
        return output


    def get_low_dim_feature(self, hidden):
        feature = self.head(hidden)
        feature = torch.nn.functional.normalize(feature, p=2, dim=-1)
        return feature


class Classifier(nn.Module):
    def __init__(self, args, hidden_dim, label_num):
        super().__init__()
        self.args = args
        self.label_num = label_num
        self.classifier = nn.Linear(hidden_dim, label_num, bias=False)
        self.loss_fn = CrossEntropyLoss()

    def forward(self, hidden, labels=None):
        logits = self.classifier(hidden)

        output = (logits,)

        if labels is not None:
            loss = self.loss_fn(logits.view(-1, self.label_num), labels.view(-1))
            output = (loss,) + output

        return output

    def weight_copy(self, prev_classifier, task_id):
        weight = prev_classifier.classifier.weight.data
        new_cls = nn.Linear(768, self.label_num, bias=False).to(self.args.device)
        new_cls.weight.data[:self.label_num - self.args.per_types[task_id]] = weight
        self.classifier = new_cls


    def incremental_learning(self, seen_rel_num):
        weight = self.classifier.weight.data
        self.classifier = nn.Linear(768, seen_rel_num, bias=False).to(self.args.device)
        with torch.no_grad():
            self.classifier.weight.data[:seen_rel_num] = weight[:seen_rel_num]


if __name__ == "__main__":
    logger = logging.getLogger(__name__)

    args = argparse.ArgumentParser()
    args.add_argument("--seed", type=int, default=20130824)

    args.add_argument("--debug", type=str2bool, default=False)



    args.add_argument("--server", type=str, default="107")
    args.add_argument("--device", type=str, default="cuda:0")
    args.add_argument("--model_name_or_path", type=str, default="bert-base-uncased")
    args.add_argument("--start_task", type=int, default=0)
    args.add_argument("--total_tasks", type=int, default=11)
    args.add_argument("--per_types", type=int, default=6)
    args.add_argument("--data_dir", type=str, default="./data/fg1_pg1")
    args.add_argument("--output_dir", type=str, default="./_results")
    args.add_argument("--hidden_dim", type=int, default=768)
    args.add_argument("--feature_dim", type=int, default=64)
    args.add_argument("--batch_size", type=int, default=8)
    args.add_argument("--dev_batch_size", type=int, default=512)
    args.add_argument("--encoder_lr", type=float, default=1e-5)
    args.add_argument("--classifier_lr", type=float, default=1e-3)
    args.add_argument("--adam_eps", type=float, default=1e-6)
    args.add_argument("--current_epoch", type=int, default=15)

    args.add_argument("--kd_temp", default=0.5, type=float, help="对比学习的温度系数")

    args.add_argument("--alpha", default=0.8, type=float, help="线性输出的权重系数")

    args.add_argument("--max_seq_length", type=int, default=128)
    args.add_argument("--report_file", type=str, default="record.txt")
    args.add_argument("--best_file", type=str, default="best_f1.txt")


    args.add_argument("--early_stop", type=int, default=6)
    args.add_argument("--usept2", type=str2bool, default=False)
    args.add_argument("--no_start_with_bert_base", type=str2bool, default=True)

    args.add_argument("--just_test", type=str2bool, default=False)
    args.add_argument("--test_task", type=int, default=1)
    args.add_argument("--test_use_other_proto", type=str2bool, default=False)

    args.add_argument("--notes", type=str, default="")
    args.add_argument("--training_evaluate", type=str2bool, default=False)
    args.add_argument("--pre_model_path", type=str, default="")




    # ======================数据加载======================
    args.add_argument("--dataset", type=str, default="ontonotes5")
    args.add_argument("--use_utils_dataset", type=str2bool, default=False)
    args.add_argument("--fg", type=int, default=2, help="第一个任务的类别数")
    args.add_argument("--pg", type=int, default=2, help="递增任务的类别数")

    args.add_argument("--conll_train_path", type=str, default="./conll03/tasks/train_fg_1_pg_1.pth")
    args.add_argument("--ontonotes_train_path", type=str, default="./ontonotes5/fg1_pg1/train_fg_1_pg_1.pth")

    # ======================是否使用蒸馏======================
    args.add_argument("--use_distill", type=str2bool, default=True)
    args.add_argument("--dev_relabel", type=str2bool, default=True)
    args.add_argument("--use_memory", type=str2bool, default=True)
    args.add_argument("--memory_size", default=5, type=int, help="每个类别的记忆大小")
    args.add_argument("--use_data_enhance", type=str2bool, default=True)
    args.add_argument("--dev_relabel_cooked", type=str2bool, default=False, help="是否使用熟成的pesudo label")

    # ======================对比学习超参数======================
    args.add_argument("--use_con", type=str2bool, default=True)
    args.add_argument("--use_con_pseudo", type=str2bool, default=True)
    args.add_argument("--cl_temp", default=0.1, type=float, help="对比学习的温度系数")
    args.add_argument("--ignore_pad_con", type=str2bool, default=False, help="是否忽略pad的对比学习")
    args.add_argument("--con_weight", default=1., type=float, help="con_loss权重")




    # ======================自熵超参数======================
    args.add_argument("--use_self_entropy", type=str2bool, default=True)
    args.add_argument("--self_weight", default=0.1, type=float, help="self_entropy_loss权重")

    # ======================kl散度超参数======================
    args.add_argument("--use_kd", type=str2bool, default=True)
    args.add_argument("--use_kd_weight", type=str2bool, default=True, help="对比学习是否使用权重")
    args.add_argument("--kd_weight", default=2.5, type=float, help="kd_loss权重")
    args.add_argument("--use_output_sim", type=str2bool, default=True)
    args.add_argument("--lambda3", type=float, default=0.5)

    # ======================重标记交叉熵超参数======================
    args.add_argument("--use_ce_pseudo", type=str2bool, default=True, help="是否使用pesudo label")
    args.add_argument("--use_loss_new", type=str2bool, default=False, help="是否使用pesudo label")
    args.add_argument("--use_old_other_losss_new", type=str2bool, default=False, help="是否使用pesudo label")



    args.add_argument("--lambda4", default=0., type=float, help="ce_loss权重")



    args = args.parse_args()


    # args.base_model_path = "/home/livosr/bert-base-cased"
    # tokenizer = BertTokenizer.from_pretrained("/home/livosr/bert-base-cased")
    # model = BertEncoder(args, tokenizer)
    # classifier = Classifier(args, args.hidden_dim, 3)
    #
    # model.to(args.device)
    # classifier.to(args.device)
    #
    # random_vectors = torch.randint(100, 10000, (8, 128))
    # mask = torch.ones_like(random_vectors)
    # random_vectors[:, 0] = 101
    # random_vectors[:, -1] = 102
    #
    # random_vectors = random_vectors.to(args.device)
    # mask = mask.to(args.device)
    # for i in range(10):
    #     model(random_vectors, mask)
    # exit(0)


    if args.seed: # 固定随机种子
        print("setting")
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)








    if args.dataset == "conll":
        args.data_dir = "./conll03"
        args.conll_train_path = args.data_dir + "/train_fg_{}_pg_{}.pth".format(args.fg, args.pg)

        args.total_tasks = 4
        args.per_types = [args.fg] + [args.pg] * ((args.total_tasks - args.fg) // args.pg)
        print(args.per_types)
        args.output_dir = "./conll03_results/fg{}_pg{}".format(args.fg, args.pg)

    if args.dataset == "ontonotes5":
        args.data_dir = "./ontonotes5"
        args.ontonotes_train_path = args.data_dir + "/train_fg_{}_pg_{}.pth".format(args.fg, args.pg)
        args.total_tasks = 18
        args.per_types = [args.fg] + [args.pg] * ((args.total_tasks - args.fg) // args.pg)
        print(args.per_types)

        args.output_dir = "./ontonotes5_results/fg{}_pg{}".format(args.fg, args.pg)

    if args.dataset == "i2b2":
        args.data_dir = "./i2b2"
        args.i2b2_train_path = args.data_dir + "/train_fg_{}_pg_{}.pth".format(args.fg, args.pg)
        args.total_tasks = 16
        args.per_types = [args.fg] + [args.pg] * ((args.total_tasks - args.fg) // args.pg)
        print(args.per_types)

        args.output_dir = "./i2b2_results/fg{}_pg{}".format(args.fg, args.pg)



    print("using pytorch2.0: ", args.usept2)
    if args.start_task == 0:
        args.no_start_with_bert_base = False


    # tuner_params = nni.get_next_parameter()
    # args = merge_parameter(args, tuner_params)
    if args.server == "107":
        if args.dataset == "conll" or args.dataset == "ontonotes5" or args.dataset == "i2b2":
            args.base_model_path = "/home/livosr/bert-base-cased"
        else:
            args.base_model_path = "/home/livosr/bert-base-uncased"
    elif args.server == "108":
        if args.dataset == "conll" or args.dataset == "ontonotes5" or args.dataset == "i2b2":
            args.base_model_path = "/data/livosr/pretrained_model/bert-base-cased"
        else:
            args.base_model_path = "/data/livosr/pretrained_model/bert-base-uncased"
    else:
        args.base_model_path = "/hy-tmp/bert-base-cased"

    tokenizer = BertTokenizer.from_pretrained(args.base_model_path)

    if not args.just_test:

        now = datetime.now()
        formatted_time = now.strftime('%Y-%m-%d %H:%M:%S')
        args.output_dir = args.output_dir + "/{}".format(formatted_time)
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)
        args.report_file = os.path.join(args.output_dir, args.report_file)
        args.best_file = os.path.join(args.output_dir, args.best_file)
        args_file = os.path.join(args.output_dir, "hyper.txt")
        current_seed = torch.initial_seed()
        print(current_seed)
        with open(args_file, 'w') as f:
            for arg in vars(args):
                f.write("{}: {}\n".format(arg, getattr(args, arg)))
            f.write("seed: {}\n".format(current_seed))
        with open(args.best_file, "a") as f:
            f.write("{}\n".format(args.notes))
    else:
        args.output_dir = "/home/livosr/pythonproject/cnrc/ontonotes5_results/2023-10-24 22:58:00"





    current_encoder = None
    current_classifier = None
    task_prototypes = None
    task_proto_features = None
    prototypes = None
    proto_features = None
    prev_encoder = None

    if args.just_test:
        args.data_dir = "./ontonotes5"
        args.test_path = "/home/livosr/pythonproject/cnrc/ontonotes5_results_new/2023-10-29 01:28:59"
        args.total_tasks = 9
        args.per_types = 2
        micros = []
        macros = []
        for i in range(args.total_tasks):
            labels_list = get_labels_dy(args.data_dir, 2, i)
            data_dir = args.data_dir
            micro_f1, macro_f1 = test(args, i, data_dir, labels_list)
            micros.append(micro_f1)
            macros.append(macro_f1)
        best_micros = np.array(micros)
        best_macros = np.array(macros)

        mean_micro = np.mean(best_micros)
        mean_macro = np.mean(best_macros)
        with open(args.test_path + "/res.txt", "a") as f:
            f.write("mean_micro: {}\n".format(mean_micro))
            f.write("mean_macro: {}\n".format(mean_macro))
        exit(0)

    best_micros = []
    best_macros = []
    for step_id in range(args.start_task, len(args.per_types)):
        total_types = sum(args.per_types[: step_id + 1])
        labels_list = get_labels_dy(args.data_dir, total_types, step_id)
        pad_label_id = CrossEntropyLoss().ignore_index

        if step_id == 0:
            model_path = args.base_model_path
        else:
            model_path = args.output_dir + "/task_{}".format(step_id - 1)
        output_dir = args.output_dir + "/task_{}".format(step_id)
        data_dir = args.data_dir


        best_micro, best_macro = train(
            model_path=model_path,
            labels_list=labels_list,
            pad_token_id=pad_label_id,
            output_dir=output_dir,
            data_dir=data_dir,
            step_id=step_id,
            logger=logger,
        )

        best_micros.append(best_micro)
        best_macros.append(best_macro)

    best_micros = np.array(best_micros)
    best_macros = np.array(best_macros)

    mean_micro = np.mean(best_micros)
    mean_macro = np.mean(best_macros)

    with open(args.best_file, "a") as f:
        f.write("mean_micro: {}\n".format(mean_micro))
        f.write("mean_macro: {}\n".format(mean_macro))







