import copy
import pickle
import random

import numpy as np
import torch

from inputexample import InputExample, InputFeatures

def set_seed(args):
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

def convert_to_features(examples,
                        label_list,
                        max_seq_length,
                        tokenizer,
                        cls_token="[CLS]",
                        cls_token_segment_id=1,
                        sep_token="[SEP]",
                        pad_token=0,
                        pad_token_segment_id=0,
                        pad_token_label_id=-1,
                        sequence_a_segment_id=0,
                        mask_padding_with_zero=True,
                        logger=None):
    label_map = {label: i for i, label in enumerate(label_list)}
    bi_label_list = ["O"]
    for label in label_list[1:]:
        bi_label_list.extend(["B-" + label, "I-" + label])
    bi_label_map = {label: i for i, label in enumerate(bi_label_list)}
    # print(label_map)
    # print(bi_label_map)
    pre_labels = {i: 0 for i in range(1, len(bi_label_map))}

    features = []

    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            logger.info("Writing example %d of %d", ex_index, len(examples))

        tokens = []
        label_ids = []
        bi_label_ids = []

        for word, label, bi_label in zip(example.words, example.labels, example.bi_labels):
            word_tokens = tokenizer.tokenize(word)
            if word_tokens:
                tokens.extend(word_tokens)
                label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
                bi_label_ids.extend([bi_label_map[bi_label]] + [pad_token_label_id] * (len(word_tokens) - 1))
                if bi_label != "O":
                    pre_labels[bi_label_map[bi_label]] += 1



        if len(tokens) > max_seq_length - 2:
            tokens = tokens[:(max_seq_length - 2)]
            label_ids = label_ids[:(max_seq_length - 2)]
            bi_label_ids = bi_label_ids[:(max_seq_length - 2)]

        tokens += [sep_token]
        label_ids += [pad_token_label_id]
        bi_label_ids += [pad_token_label_id]

        segment_ids = [sequence_a_segment_id] * len(tokens)

        tokens = [cls_token] + tokens
        label_ids = [pad_token_label_id] + label_ids
        bi_label_ids = [pad_token_label_id] + bi_label_ids
        segment_ids = [cls_token_segment_id] + segment_ids



        input_ids = tokenizer.convert_tokens_to_ids(tokens)
        input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)



        padding_length = max_seq_length - len(input_ids)

        input_ids += ([pad_token] * padding_length)
        input_mask += ([0 if mask_padding_with_zero else 1] * padding_length)
        segment_ids += ([pad_token_segment_id] * padding_length)
        label_ids += ([pad_token_label_id] * padding_length)
        bi_label_ids += ([pad_token_label_id] * padding_length)

        assert len(input_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length
        assert len(label_ids) == max_seq_length, print(len(label_ids), max_seq_length)
        assert len(bi_label_ids) == max_seq_length, print(len(bi_label_ids), max_seq_length)

        if ex_index < 5:
            logger.info("*** Example ***")
            logger.info("guid: %s", example.guid)
            logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
            logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
            logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
            logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
            logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
            logger.info("bi_label_ids: %s", " ".join([str(x) for x in bi_label_ids]))
        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_ids=label_ids,
                          bi_label_ids=bi_label_ids))
    print("masked_labels_stat = ", pre_labels)
    return features

def conll_loading(
        args,
        task_num,
        pad_token=-100,
        pad_token_segment_id=0,
        sequence_a_segment_id=0,
        max_seq_length=128):
    print(args.conll_train_path)
    with open(args.conll_train_path, "rb") as f:
        inputs_train_dict, y_train_dict = pickle.load(f)
    features = []

    if task_num == 0:
        start = 0
        end = args.per_types[0] - 1
        key = str(start) + "_" + str(end)
    else:
        start = sum(args.per_types[: task_num])
        end = sum(args.per_types[: task_num + 1]) - 1
        key = str(start) + "_" \
              + str(end)


    label_max, label_min = 0, 100

    inputs = inputs_train_dict[key]
    ys = y_train_dict[key]
    yichang = 0
    pre_labels = {i: 0 for i in range(1, 9)}

    for input_ids, label_ids in zip(inputs, ys):
        bi_label_ids = []
        if len(input_ids) > max_seq_length:
            input_ids = input_ids[:max_seq_length-1] + [102]
            label_ids = label_ids[:max_seq_length-1] + [-100]


        segment_ids = [sequence_a_segment_id] * len(input_ids)
        input_mask = [1] * len(input_ids)


        for i in range(len(label_ids)):
            if label_ids[i] > 0:

                pre_labels[label_ids[i]] += 1
                if label_ids[i] > (end + 1) * 2 or label_ids[i] <= start * 2:
                    yichang = 1 + yichang


                    label_ids[i] = 0
                    bi_label_ids.append(label_ids[i])
                elif label_ids[i] % 2 == 0:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = label_ids[i] / 2
                else:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = (label_ids[i] + 1) / 2

                if label_ids[i] > label_max:
                    label_max = label_ids[i]
                if label_ids[i] < label_min and label_ids[i] > 0:
                    label_min = label_ids[i]
            else:
                bi_label_ids.append(label_ids[i])





        padding_length = max_seq_length - len(input_ids)

        input_ids += ([pad_token] * padding_length)

        input_mask += ([0] * padding_length)
        segment_ids += ([pad_token_segment_id] * padding_length)
        label_ids += ([-100] * padding_length)
        bi_label_ids += ([-100] * padding_length)


        assert len(input_ids) == max_seq_length
        assert len(bi_label_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length
        assert len(label_ids) == max_seq_length, print(len(label_ids), max_seq_length)

        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_ids=label_ids,
                          bi_label_ids=bi_label_ids))
    print(label_max, label_min)
    print("training labels_wo_mask", pre_labels)
    return features


def ontonotes_loading(
        args,
        task_num,
        pad_token=-100,
        pad_token_segment_id=0,
        sequence_a_segment_id=0,
        max_seq_length=128):
    print(args.ontonotes_train_path)
    with open(args.ontonotes_train_path, "rb") as f:
        inputs_train_dict, y_train_dict = pickle.load(f)
    features = []

    if task_num == 0:
        start = 0
        end = args.per_types[0] - 1
        key = str(start) + "_" + str(end)
    else:
        start = sum(args.per_types[: task_num])
        end = sum(args.per_types[: task_num + 1]) - 1
        key = str(start) + "_" \
              + str(end)


    label_max, label_min = 0, 100

    inputs = inputs_train_dict[key]
    ys = y_train_dict[key]
    yichang = 0
    print(len(inputs))

    for input_ids, label_ids in zip(inputs, ys):
        bi_label_ids = []
        if len(input_ids) > max_seq_length:
            input_ids = input_ids[:max_seq_length-1] + [102]
            label_ids = label_ids[:max_seq_length-1] + [-100]


        segment_ids = [sequence_a_segment_id] * len(input_ids)
        input_mask = [1] * len(input_ids)

        for i in range(len(label_ids)):
            if label_ids[i] > 0:
                if label_ids[i] > (end + 1) * 2 or label_ids[i] <= start * 2:
                    yichang = 1 + yichang
                    label_ids[i] = 0
                    bi_label_ids.append(label_ids[i])
                elif label_ids[i] % 2 == 0:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = label_ids[i] / 2
                else:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = (label_ids[i] + 1) / 2

                if label_ids[i] > label_max:
                    label_max = label_ids[i]
                if label_ids[i] < label_min and label_ids[i] > 0:
                    label_min = label_ids[i]
            else:
                bi_label_ids.append(label_ids[i])





        padding_length = max_seq_length - len(input_ids)

        input_ids += ([pad_token] * padding_length)

        input_mask += ([0] * padding_length)
        segment_ids += ([pad_token_segment_id] * padding_length)
        label_ids += ([-100] * padding_length)
        bi_label_ids += ([-100] * padding_length)


        assert len(input_ids) == max_seq_length
        assert len(bi_label_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length
        assert len(label_ids) == max_seq_length, print(len(label_ids), max_seq_length)

        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_ids=label_ids,
                          bi_label_ids=bi_label_ids))
    print(label_max, label_min)

    return features


def i2b2_loading(
        args,
        task_num,
        pad_token=-100,
        pad_token_segment_id=0,
        sequence_a_segment_id=0,
        max_seq_length=128):
    print(args.i2b2_train_path)
    with open(args.i2b2_train_path, "rb") as f:
        inputs_train_dict, y_train_dict = pickle.load(f)
    features = []

    if task_num == 0:
        start = 0
        end = args.per_types[0] - 1
        key = str(start) + "_" + str(end)
    else:
        start = sum(args.per_types[: task_num])
        end = sum(args.per_types[: task_num + 1]) - 1
        key = str(start) + "_" \
              + str(end)


    label_max, label_min = 0, 100

    inputs = inputs_train_dict[key]
    ys = y_train_dict[key]
    yichang = 0
    print(len(inputs))

    for input_ids, label_ids in zip(inputs, ys):
        bi_label_ids = []
        if len(input_ids) > max_seq_length:
            input_ids = input_ids[:max_seq_length-1] + [102]
            label_ids = label_ids[:max_seq_length-1] + [-100]


        segment_ids = [sequence_a_segment_id] * len(input_ids)
        input_mask = [1] * len(input_ids)

        for i in range(len(label_ids)):
            if label_ids[i] > 0:
                if label_ids[i] > (end + 1) * 2 or label_ids[i] <= start * 2:
                    yichang = 1 + yichang
                    label_ids[i] = 0
                    bi_label_ids.append(label_ids[i])
                elif label_ids[i] % 2 == 0:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = label_ids[i] / 2
                else:
                    bi_label_ids.append(label_ids[i])
                    label_ids[i] = (label_ids[i] + 1) / 2

                if label_ids[i] > label_max:
                    label_max = label_ids[i]
                if label_ids[i] < label_min and label_ids[i] > 0:
                    label_min = label_ids[i]
            else:
                bi_label_ids.append(label_ids[i])





        padding_length = max_seq_length - len(input_ids)

        input_ids += ([pad_token] * padding_length)

        input_mask += ([0] * padding_length)
        segment_ids += ([pad_token_segment_id] * padding_length)
        label_ids += ([-100] * padding_length)
        bi_label_ids += ([-100] * padding_length)


        assert len(input_ids) == max_seq_length
        assert len(bi_label_ids) == max_seq_length
        assert len(input_mask) == max_seq_length
        assert len(segment_ids) == max_seq_length
        assert len(label_ids) == max_seq_length, print(len(label_ids), max_seq_length)

        features.append(
            InputFeatures(input_ids=input_ids,
                          input_mask=input_mask,
                          segment_ids=segment_ids,
                          label_ids=label_ids,
                          bi_label_ids=bi_label_ids))
    print(label_max, label_min)
    return features

def get_labels(path):
    path = path + "/labels.txt"
    if path:
        with open(path, "r") as f:
            labels = f.read().splitlines()
            if "O" not in labels:
                labels = ["O"] + labels
        return labels
    else:
        raise ValueError("path is None")

def get_labels_dy(path, total_types, step_id):
    types_list = get_labels(path)
    if "O" in types_list:
        types_list.remove("O")
    labels = types_list[: total_types]
    if "O" not in labels:
        labels = ["O"] + labels
    return labels

# 从文件中读取数据集
# 得到的examples是一个example列表，每个example是一个InputExample对象
# 包括guid（mode_id）, words, labels三个属性

def get_from_file(args, data_dir, mode, labels_list):
    file_path = data_dir + "/{}.txt".format(mode)
    guid_index = 1
    examples = []
    with open(file_path, encoding="utf-8") as f:
        words = []
        labels = []
        for line in f:
            if line.startswith("-DOCSTART-") or not line.strip():
                if words:

                    examples.append(InputExample(guid="{}-{}".format(mode, guid_index),
                                                 words=words,
                                                 labels=labels))
                    guid_index += 1
                    words = []
                    labels = []
            else:
                splits = line.split("\t")

                if splits[0].strip():
                    words.append(splits[0])
                    if len(splits) > 1:
                        labels.append(splits[-1].replace("\n", ""))
                    else:
                        # Examples could have no label for mode = "test"
                        labels.append("O")
        if words:
            examples.append(InputExample(guid="%s-%d".format(mode, guid_index),
                                         words=words,
                                         labels=labels))
        if mode == "memory" and args.use_data_enhance:
            examples = data_enhance(examples, labels_list[1:])
    return examples




def get_from_file_conll03(data_dir, mode, labels_list):
    file_path = data_dir + "/{}.txt".format(mode)
    guid_index = 1
    examples = []
    with open(file_path, encoding="utf-8") as f:
        words = []
        labels = []
        for line in f:
            if line.startswith("-DOCSTART-") or not line.strip():
                if words:
                    examples.append(InputExample(guid="{}-{}".format(mode, guid_index),
                                                 words=words,
                                                 labels=labels))
                    guid_index += 1
                    words = []
                    labels = []
            else:
                splits = line.split("\t")
                if splits[0].strip():
                    words.append(splits[0])
                    if len(splits) > 1:
                        labels.append(splits[-1].replace("\n", ""))
                    else:
                        # Examples could have no label for mode = "test"
                        labels.append("O")
        if words:
            examples.append(InputExample(guid="%s-%d".format(mode, guid_index),
                                         words=words,
                                         labels=labels))
        if mode == "memory":
            examples = data_enhance(examples, labels_list[1:])
    return examples


def data_enhance(examples, labels_list):
    examples_res = []
    examples_dict = {label: [] for label in labels_list}
    for label in labels_list:
        for example in examples:
            if label in example.labels:
                examples_dict[label].append(example)



    for label in labels_list:
        examples_classes = examples_dict[label]
        sens = []
        entities = []

        for examples_class in examples_classes:

            sen_list = {"example": examples_class, "se": []}
            pre = "O"
            for i in range(len(examples_class.labels)):
                if examples_class.labels[i] == label:
                    if pre == "O":
                        start = i
                    pre = label
                else:
                    if pre == label:
                        end = i
                        sen_list["se"].append((start, end))
                        entities.append(examples_class.words[start: end])
                    pre = "O"
            sens.append(sen_list)
        sens_copy = copy.deepcopy(sens)
        random.shuffle(sens_copy)
        random.shuffle(entities)

        for i in range(len(sens)):
            sen = sens[i]
            for se in sen["se"]:
                start = se[0]
                end = se[1]
                new_sen_0 = sen["example"].words
                new_label_0 = sen["example"].labels
                new_sen_1 = \
                    sen["example"].words[:start] + entities[i] + sen["example"].words[end:]
                new_label_1 = sen["example"].labels[:start] + [label] * len(entities[i]) \
                              + sen["example"].labels[end:]
                new_sen_2 = sen["example"].words + sens_copy[i]["example"].words
                new_label_2 = sen["example"].labels + sens_copy[i]["example"].labels
                new_sen_3 = sens_copy[i]["example"].words + sen["example"].words
                new_label_3 = sens_copy[i]["example"].labels + sen["example"].labels
                examples_res.append(InputExample("pseudo", new_sen_0, new_label_0))
                examples_res.append(InputExample("pseudo", new_sen_1, new_label_1))
                examples_res.append(InputExample("pseudo", new_sen_2, new_label_2))
                examples_res.append(InputExample("pseudo", new_sen_3, new_label_3))

    return examples_res





def read_examples_from_file(data_dir, mode, args, labels, now_labels):


    examples = get_from_file_wo_division(data_dir, mode, labels, now_labels, args)

    if args.debug == True:
        examples = examples[:100]

    return examples



def get_from_file_wo_division(data_dir, mode, labels_list, now_labels_list, args):
    print("loading labels_list: ", labels_list)
    file_path = data_dir + "/{}.txt".format(mode)
    guid_index = 1
    examples = []

    # if mode == "memory":
    #     print(file_path)
    #     exit(0)
    with open(file_path, encoding="utf-8") as f:
        words = []
        labels = []
        bi_labels = []
        for line in f:
            if line.startswith("-DOCSTART-") or not line.strip():
                if words:
                    examples.append(InputExample(guid="{}-{}".format(mode, guid_index),
                                                 words=words,
                                                 labels=labels,
                                                 bi_labels=bi_labels))
                    guid_index += 1
                    # print(words)
                    # print(labels)
                    # print(bi_labels)
                    # print()
                    words = []
                    labels = []
                    bi_labels = []
            else:
                if args.dataset == "ontonotes5":
                    splits = line.split(" ")
                else:
                    splits = line.split("\t")

                if splits[0].strip():
                    words.append(splits[0])
                    if len(splits) > 1:
                        raw_label = splits[-1].replace("\n", "")
                        label_split = raw_label.split("-")

                        if len(label_split) == 1:
                            labels.append(label_split[-1])
                            bi_labels.append(label_split[-1])
                        else:
                            if label_split[-1] in now_labels_list:
                                labels.append(label_split[-1])
                                bi_labels.append(raw_label)
                            else:
                                labels.append("O")
                                bi_labels.append("O")


                    else:
                        # Examples could have no label for mode = "test"
                        labels.append("O")
        if words:
            examples.append(InputExample(guid="%s-%d".format(mode, guid_index),
                                         words=words,
                                         labels=labels,
                                         bi_labels=bi_labels))
    return examples

if __name__ == "__main__":
    get_from_file_wo_division("/home/livosr/pythonproject/cnrc/ontonotes5", "test", ["EVENT", "PERSON"])