import argparse
import logging
import os

import torch
from torch.utils.data import TensorDataset
from transformers import BertConfig, BertTokenizer

from utils import read_examples_from_file, convert_to_features, get_labels_dy, conll_loading


def load_and_cache_examples(args, tokenizer, labels, pad_id, mode, data_dir, logger):

    cache_file = os.path.join(data_dir, "cached_{}_{}".
                              format(mode,
                                     args.max_seq_length))
    if os.path.exists(cache_file):
        logger.info("Loading features from cached file %s", cache_file)
        features = torch.load(cache_file)



    else:
        logger.info("Creating features from dataset file at %s", data_dir)


        if mode == "train" and args.dataset == "conll":

            features = conll_loading(
                args=args,
                task_num=int(data_dir.split("/")[-1].split("_")[-1]),
                pad_token=tokenizer.pad_token_id,
                pad_token_segment_id=0,
                sequence_a_segment_id=0,
                max_seq_length=128)
        else:
            examples = read_examples_from_file(data_dir, mode, args, labels)
            features = convert_to_features(examples=examples,
                                           label_list=labels,
                                           max_seq_length=args.max_seq_length,
                                           tokenizer=tokenizer,
                                           cls_token=tokenizer.cls_token,
                                           cls_token_segment_id=0,
                                           sep_token=tokenizer.sep_token,
                                           pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
                                           pad_token_segment_id=0,
                                           pad_token_label_id=pad_id,
                                           sequence_a_segment_id=0,
                                           mask_padding_with_zero=True,
                                           logger=logger)

        # 得到填充到最大长度的 input_ids, input_mask, segment_ids, label_ids. 以InputFeatures的形式
        # cls和sep以及padding对应的label是-100，pytorch的交叉熵定义-100不参与loss计算,CLS和SEP亦如此
        # [101, 2023, 10685, 15767, 1999, 4529, 2043, 9433, 2020, 14153, 3755, 2000, 1996, 2034, 2407, 1010, 2635, 1037,
        #  2173, 2008, 18127, 3373, 2323, 2022, 17156, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0]
        # [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        # [-100, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100]





        logger.info("Saving features into cached file %s", cache_file)
        torch.save(features, cache_file)

    print("{} set have {} sentences. ".format(mode, len(features)))
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
    all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)

    if args.debug:
        all_label_ids = all_label_ids[:100]
        all_input_ids = all_input_ids[:100]
        all_input_mask = all_input_mask[:100]
        all_segment_ids = all_segment_ids[:100]


    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)



    return dataset





if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Hyperparams settings')

    parser.add_argument("--debug", default=False,
                        help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")

    parser.add_argument("--base_path", default="/home/livosr/classincreasing/", type=str,
                        help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")

    parser.add_argument("--data_dir", default="data/tasks/", type=str,
                        help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")

    parser.add_argument("--model_name_or_path", default="output_nerd", type=str,
                        help="Path to pre-trained model or shortcut name")
    parser.add_argument("--output_dir", default="output_nerd/", type=str,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--labels", default="data/labels.txt", type=str,
                        help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
    parser.add_argument("--max_seq_length", default=128, type=int,
                        help="The maximum total input sequence length after tokenization. Sequences longer "
                             "than this will be truncated, sequences shorter will be padded.")

    ## Traning or evaluating parameters
    parser.add_argument("--do_train", type=bool, default=True,
                        help="Whether to run training.")
    parser.add_argument("--do_eval", type=bool, default=False,
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_predict", type=bool, default=True,
                        help="Whether to run predictions on the test set.")
    parser.add_argument("--change_th", type=bool, default=True)
    parser.add_argument("--evaluate_during_training", type=bool, default=True,
                        help="Whether to run evaluation during training at each logging step.")
    parser.add_argument("--memory_update", type=bool, default=False,
                        help="Whether to update memory data.")
    parser.add_argument("--do_lower_case", type=bool, default=True,
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--per_gpu_train_batch_size", default=16, type=int,
                        help="Batch size per GPU/CPU for training.")
    parser.add_argument("--per_gpu_eval_batch_size", default=16, type=int,
                        help="Batch size per GPU/CPU for evaluation.")
    parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=5e-5, type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--weight_decay", default=0.0, type=float,
                        help="Weight decay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-8, type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")

    ## Epoch
    parser.add_argument("--num_train_epochs", default=16.0, type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--start_train_o_epoch", default=10.0, type=float,
                        help="The number of training type 'O' epoch to perform.")
    parser.add_argument("--max_steps", default=-1, type=int,
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
    parser.add_argument("--warmup_steps", default=0, type=int,
                        help="Linear warmup over warmup_steps.")
    parser.add_argument("--scale", type=int, default=25)
    parser.add_argument("--logging_steps", type=int, default=4000,
                        help="Log every X updates steps.")
    parser.add_argument("--save_steps", type=int, default=4000,
                        help="Save checkpoint every X updates steps.")
    parser.add_argument("--eval_all_checkpoints", type=bool, default=True,
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
    # parser.add_argument("--no_cuda", action="store_true",
    #                     help="Avoid using CUDA when available")
    parser.add_argument("--overwrite_output_dir", type=bool, default=True,
                        help="Overwrite the content of the output directory")
    parser.add_argument("--overwrite_cache", type=bool, default=False,
                        help="Overwrite the cached training and evaluation sets")
    parser.add_argument("--seed", type=int, default=1,
                        help="random seed for initialization")

    ## Relabling parameters
    parser.add_argument("--relabel_th", type=float, default=0.98)
    parser.add_argument("--relabels_th_reduction", type=float, default=0.05)

    parser.add_argument("--local_rank", type=int, default=-1,
                        help="For distributed training: local_rank")
    parser.add_argument("--loss_name1", type=str, default="supcon_ce", help="Name of entity-oriented loss function.")
    parser.add_argument("--loss_name2", type=str, default="supcon_o_bce", help="Name of entity-aware with 'O' loss function.")
    parser.add_argument("--cls_name", type=str, default="ncm_dot", help="Name of classifier.")
    ## Task parameters
    parser.add_argument("--nb_tasks", type=int, default=11, help="The number of tasks.")
    parser.add_argument("--start_step", type=int, default=0, help="The index of start step.")
    parser.add_argument("--log_dir", type=str, default="log/nerd/results.txt",
                        help="The logging directory where the test results will be written.")
    parser.add_argument("--per_types", type=int, default=6,
                        help="The number of each task.")
    parser.add_argument("--feat_dim", type=int, default=128,
                        help="The dimension of features.")
    parser.add_argument("--train_temp", type=int, default=2,
                        help="The distilling temperature in training parse.")
    parser.add_argument("--eval_temp", type=int, default=1,
                        help="The distilling temperature in inference parse.")
    args = parser.parse_args()

    MODEL_CLASSES = {
        "bert": (BertConfig, BertTokenizer)
    }

    model_path = "/home/livosr/bert-base-uncased"

    config_class, tokenizer_class = MODEL_CLASSES["bert"]
    config = config_class.from_pretrained(model_path, num_labels=6)
    tokenizer = tokenizer_class.from_pretrained(model_path, do_lower_case=args.do_lower_case)

    args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    labels = get_labels_dy(args.data_dir, 6, 0)
    data_dir = args.data_dir + "task_{}".format(0)
    logger = logging.getLogger(__name__)
    load_and_cache_examples(args, tokenizer, labels, -100, "memory", data_dir, logger)