import argparse
import logging
import os
import pickle

import torch
from torch.utils.data import TensorDataset, Dataset
from transformers import BertConfig, BertTokenizer

from utils import read_examples_from_file, convert_to_features, get_labels_dy, conll_loading, ontonotes_loading, \
    i2b2_loading


class NERDataset(Dataset):
    def __init__(self, args, task_num):
                # 初始化存储句子和标签的列表

        inputs_ids_list, input_mask_list, segment_ids_list, label_ids_list = self.conll_loading(
                args,
                task_num,
                pad_token=-100,
                pad_token_segment_id=0,
                sequence_a_segment_id=0,
                max_seq_length=128)
        # 如果最后一行不是空行，需要额外处理最后一个句子
        self.inputs_ids = inputs_ids_list
        self.input_masks = input_mask_list
        self.segment_ids = segment_ids_list
        self.label_ids = label_ids_list



    def conll_loading(
            self,
            args,
            task_num,
            pad_token=-100,
            pad_token_segment_id=0,
            sequence_a_segment_id=0,
            max_seq_length=128):
        with open(args.conll_train_path, "rb") as f:
            inputs_train_dict, y_train_dict = pickle.load(f)
        features = []
        key = str(task_num) + "_" + str(task_num)

        inputs = inputs_train_dict[key]
        ys = y_train_dict[key]
        yichang = 0

        inputs_ids_list = []
        input_mask_list = []
        segment_ids_list = []
        label_ids_list = []

        for input_ids, label_ids in zip(inputs, ys):

            if len(input_ids) > max_seq_length:
                input_ids = input_ids[:max_seq_length - 1] + [102]
                label_ids = label_ids[:max_seq_length - 1] + [-100]

            segment_ids = [sequence_a_segment_id] * len(input_ids)
            input_mask = [1] * len(input_ids)

            for i in range(len(label_ids)):
                if label_ids[i] > 0:
                    if label_ids[i] > (task_num + 1) * 2 or label_ids[i] <= task_num * 2:
                        yichang = 1 + yichang
                        label_ids[i] = 0
                    elif label_ids[i] % 2 == 0:
                        label_ids[i] = label_ids[i] / 2
                    else:
                        label_ids[i] = (label_ids[i] + 1) / 2


            assert len(input_ids) == len(input_mask)
            assert len(input_mask) == len(segment_ids)
            assert len(segment_ids) == len(label_ids)


            inputs_ids_list.append(input_ids)
            input_mask_list.append(input_mask)
            segment_ids_list.append(segment_ids)
            label_ids_list.append(label_ids)

        return inputs_ids_list, input_mask_list, segment_ids_list, label_ids_list
    def __len__(self):
        return len(self.inputs_ids)
    def __getitem__(self, idx):
        inputs_id = self.inputs_ids[idx]
        input_mask = self.input_masks[idx]
        segment_id = self.segment_ids[idx]
        label_id = self.label_ids[idx]
        return inputs_id, input_mask, segment_id, label_id


def collate_fn(data): # 长截短填s
    input_id_batch, input_mask_batch, segment_id_batch, label_id_batch = zip(*data)
    lengths = [len(input_id) for input_id in input_id_batch] # 一个batch的句子长度
    max_lengths = max(lengths) # 最大长度
    for i, (input_id, input_mask, segment_id, label_id) in \
            enumerate(zip(input_id_batch, input_mask_batch, segment_id_batch, label_id_batch)):
        padding_length = max_lengths - len(input_id)
        input_id += ([0] * padding_length)
        input_mask += ([0] * padding_length)
        segment_id += ([0] * padding_length)
        label_id += ([-100] * padding_length)
    input_id_batch = torch.tensor(input_id_batch, dtype=torch.long)
    input_mask_batch = torch.tensor(input_mask_batch, dtype=torch.long)
    segment_id_batch = torch.tensor(segment_id_batch, dtype=torch.long)
    label_id_batch = torch.tensor(label_id_batch, dtype=torch.long)



    return input_id_batch, input_mask_batch, segment_id_batch, label_id_batch

def load_and_cache_examples(args, tokenizer, labels, pad_id, mode, data_dir, logger, task_num):

    cache_file = os.path.join(data_dir, "cached_{}_{}".
                              format(mode,
                                     args.max_seq_length))
    # if os.path.exists(cache_file):
    if 1==0:
        logger.info("Loading features from cached file %s", cache_file)
        features = torch.load(cache_file)

    elif mode == "train" and args.dataset == "conll" and args.use_utils_dataset:
        dataset = NERDataset(args, task_num=int(data_dir.split("/")[-1].split("_")[-1]))
        return dataset


    else:

        if mode == "train" and args.dataset == "conll":
            print("==========loading conll data from training_data===========")
            features = conll_loading(
                args=args,
                task_num=task_num,
                pad_token=tokenizer.pad_token_id,
                pad_token_segment_id=0,
                sequence_a_segment_id=0,
                max_seq_length=128)



        elif mode == "train" and args.dataset == "ontonotes5":
            print("==========loading ontonotes data from training_data===========")
            features = ontonotes_loading(
                args=args,
                task_num=task_num,
                pad_token=tokenizer.pad_token_id,
                pad_token_segment_id=0,
                sequence_a_segment_id=0,
                max_seq_length=128)

        elif mode == "train" and args.dataset == "i2b2":
            print("==========loading i2b2 data from training_data===========")
            features = i2b2_loading(
                args=args,
                task_num=task_num,
                pad_token=tokenizer.pad_token_id,
                pad_token_segment_id=0,
                sequence_a_segment_id=0,
                max_seq_length=128)

            # labels_list = {i: 0 for i in range(len(labels))}
            # for i in range(len(features)):
            #     for j in range(len(features[i].input_ids)):
            #         if features[i].label_ids[j] > 0:
            #             labels_list[features[i].label_ids[j]] += 1
            #         if features[i].input_ids[j] == 102:
            #             break
            # print(labels_list)


        else:
            if mode == "dev" or mode == "memory":
                if task_num == 0:
                    now_labels = labels[:args.per_types[task_num] + 1]
                else:
                    labels_o = labels[0]
                    start = sum(args.per_types[:task_num]) + 1
                    end = sum(args.per_types[:task_num + 1]) + 1
                    now_labels = labels[start: end]
                    now_labels = [labels_o] + now_labels
            else:
                now_labels = labels
            print("==========loading {} data from {}_data===========".format(args.dataset, mode))
            print(mode, labels, now_labels)

            examples = read_examples_from_file(data_dir, mode, args, labels, now_labels)

            if mode == "memory":
                examples = examples[:args.memory_size * (len(labels)-1)]
            features = convert_to_features(examples=examples,
                                           label_list=labels,
                                           max_seq_length=args.max_seq_length,
                                           tokenizer=tokenizer,
                                           cls_token=tokenizer.cls_token,
                                           cls_token_segment_id=0,
                                           sep_token=tokenizer.sep_token,
                                           pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
                                           pad_token_segment_id=0,
                                           pad_token_label_id=pad_id,
                                           sequence_a_segment_id=0,
                                           mask_padding_with_zero=True,
                                           logger=logger)


        # 得到填充到最大长度的 input_ids, input_mask, segment_ids, label_ids. 以InputFeatures的形式
        # cls和sep以及padding对应的label是-100，pytorch的交叉熵定义-100不参与loss计算,CLS和SEP亦如此
        # [101, 2023, 10685, 15767, 1999, 4529, 2043, 9433, 2020, 14153, 3755, 2000, 1996, 2034, 2407, 1010, 2635, 1037,
        #  2173, 2008, 18127, 3373, 2323, 2022, 17156, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0]
        # [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        #  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        # [-100, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
        #  -100, -100, -100, -100, -100, -100, -100]





        logger.info("Saving features into cached file %s", cache_file)
        torch.save(features, cache_file)

    print("{} set have {} sentences. ".format(mode, len(features)))
    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long).to(args.device)
    all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long).to(args.device)
    all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long).to(args.device)
    all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long).to(args.device)
    all_bi_label_ids = torch.tensor([f.bi_label_ids for f in features], dtype=torch.long).to(args.device)

    if args.debug:
        all_label_ids = all_label_ids[:100]
        all_input_ids = all_input_ids[:100]
        all_input_mask = all_input_mask[:100]
        all_segment_ids = all_segment_ids[:100]
        all_bi_label_ids = all_bi_label_ids[:100]


    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_bi_label_ids)



    return dataset





# if __name__ == "__main__":
#     parser = argparse.ArgumentParser(description='Hyperparams settings')
#
#     parser.add_argument("--debug", default=False,
#                         help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
#
#     parser.add_argument("--base_path", default="/home/livosr/classincreasing/", type=str,
#                         help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
#
#     parser.add_argument("--data_dir", default="data/tasks/", type=str,
#                         help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.")
#
#     parser.add_argument("--model_name_or_path", default="output_nerd", type=str,
#                         help="Path to pre-trained model or shortcut name")
#     parser.add_argument("--output_dir", default="output_nerd/", type=str,
#                         help="The output directory where the model predictions and checkpoints will be written.")
#
#     ## Other parameters
#     parser.add_argument("--labels", default="data/labels.txt", type=str,
#                         help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
#     parser.add_argument("--max_seq_length", default=128, type=int,
#                         help="The maximum total input sequence length after tokenization. Sequences longer "
#                              "than this will be truncated, sequences shorter will be padded.")
#
#     ## Traning or evaluating parameters
#     parser.add_argument("--do_train", type=bool, default=True,
#                         help="Whether to run training.")
#     parser.add_argument("--do_eval", type=bool, default=False,
#                         help="Whether to run eval on the dev set.")
#     parser.add_argument("--do_predict", type=bool, default=True,
#                         help="Whether to run predictions on the test set.")
#     parser.add_argument("--change_th", type=bool, default=True)
#     parser.add_argument("--evaluate_during_training", type=bool, default=True,
#                         help="Whether to run evaluation during training at each logging step.")
#     parser.add_argument("--memory_update", type=bool, default=False,
#                         help="Whether to update memory data.")
#     parser.add_argument("--do_lower_case", type=bool, default=True,
#                         help="Set this flag if you are using an uncased model.")
#     parser.add_argument("--per_gpu_train_batch_size", default=16, type=int,
#                         help="Batch size per GPU/CPU for training.")
#     parser.add_argument("--per_gpu_eval_batch_size", default=16, type=int,
#                         help="Batch size per GPU/CPU for evaluation.")
#     parser.add_argument("--gradient_accumulation_steps", type=int, default=1,
#                         help="Number of updates steps to accumulate before performing a backward/update pass.")
#     parser.add_argument("--learning_rate", default=5e-5, type=float,
#                         help="The initial learning rate for Adam.")
#     parser.add_argument("--weight_decay", default=0.0, type=float,
#                         help="Weight decay if we apply some.")
#     parser.add_argument("--adam_epsilon", default=1e-8, type=float,
#                         help="Epsilon for Adam optimizer.")
#     parser.add_argument("--max_grad_norm", default=1.0, type=float,
#                         help="Max gradient norm.")
#
#     ## Epoch
#     parser.add_argument("--num_train_epochs", default=16.0, type=float,
#                         help="Total number of training epochs to perform.")
#     parser.add_argument("--start_train_o_epoch", default=10.0, type=float,
#                         help="The number of training type 'O' epoch to perform.")
#     parser.add_argument("--max_steps", default=-1, type=int,
#                         help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
#     parser.add_argument("--warmup_steps", default=0, type=int,
#                         help="Linear warmup over warmup_steps.")
#     parser.add_argument("--scale", type=int, default=25)
#     parser.add_argument("--logging_steps", type=int, default=4000,
#                         help="Log every X updates steps.")
#     parser.add_argument("--save_steps", type=int, default=4000,
#                         help="Save checkpoint every X updates steps.")
#     parser.add_argument("--eval_all_checkpoints", type=bool, default=True,
#                         help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
#     # parser.add_argument("--no_cuda", action="store_true",
#     #                     help="Avoid using CUDA when available")
#     parser.add_argument("--overwrite_output_dir", type=bool, default=True,
#                         help="Overwrite the content of the output directory")
#     parser.add_argument("--overwrite_cache", type=bool, default=False,
#                         help="Overwrite the cached training and evaluation sets")
#     parser.add_argument("--seed", type=int, default=1,
#                         help="random seed for initialization")
#
#     ## Relabling parameters
#     parser.add_argument("--relabel_th", type=float, default=0.98)
#     parser.add_argument("--relabels_th_reduction", type=float, default=0.05)
#
#     parser.add_argument("--local_rank", type=int, default=-1,
#                         help="For distributed training: local_rank")
#     parser.add_argument("--loss_name1", type=str, default="supcon_ce", help="Name of entity-oriented loss function.")
#     parser.add_argument("--loss_name2", type=str, default="supcon_o_bce", help="Name of entity-aware with 'O' loss function.")
#     parser.add_argument("--cls_name", type=str, default="ncm_dot", help="Name of classifier.")
#     ## Task parameters
#     parser.add_argument("--nb_tasks", type=int, default=11, help="The number of tasks.")
#     parser.add_argument("--start_step", type=int, default=0, help="The index of start step.")
#     parser.add_argument("--log_dir", type=str, default="log/nerd/results.txt",
#                         help="The logging directory where the test results will be written.")
#     parser.add_argument("--per_types", type=int, default=6,
#                         help="The number of each task.")
#     parser.add_argument("--feat_dim", type=int, default=128,
#                         help="The dimension of features.")
#     parser.add_argument("--train_temp", type=int, default=2,
#                         help="The distilling temperature in training parse.")
#     parser.add_argument("--eval_temp", type=int, default=1,
#                         help="The distilling temperature in inference parse.")
#     args = parser.parse_args()
#
#     MODEL_CLASSES = {
#         "bert": (BertConfig, BertTokenizer)
#     }
#
#     model_path = "/home/livosr/bert-base-uncased"
#
#     config_class, tokenizer_class = MODEL_CLASSES["bert"]
#     config = config_class.from_pretrained(model_path, num_labels=6)
#     tokenizer = tokenizer_class.from_pretrained(model_path, do_lower_case=args.do_lower_case)
#
#     args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#     labels = get_labels_dy(args.data_dir, 6, 0)
#     data_dir = args.data_dir + "task_{}".format(0)
#     logger = logging.getLogger(__name__)
#     load_and_cache_examples(args, tokenizer, labels, -100, "memory", data_dir, logger)