# -*- coding:utf8 -*-
# @Time : 2023/2/23 16:00
# @Author : WanJie Wu
import json
import os
import torch
from tqdm import tqdm
import time
from torch.utils.data import TensorDataset


class InputExample(object):
    def __init__(self, guid, text_a, label=None):
        """
        guid: Global Unique Identifier 全局唯一标志符
        text_a: bert 第一个句子
        """
        self.guid = guid
        self.text_a = text_a
        self.label = label


class InputFeatures(object):
    def __init__(self, input_ids, attention_mask, token_type_ids, label_ids, input_len):
        self.input_ids = input_ids
        self.attention_mask = attention_mask
        self.token_type_ids = token_type_ids
        self.label_ids = label_ids
        self.input_len = input_len


def convert_examples_to_features(
        examples, label2id, tokenizer,
        max_seq_length=512,
        special_tokens_count=2,
        token_a_type_id=0,
        sep_token="[SEP]",
        cls_token="[CLS]",
        pad_token=0
    ):
    """
    Loads a data file into a list of `InputBatch`s
    `cls_token_at_end` define the location of the CLS token:
        - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
        - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
    `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
    """
    features = []
    for idx, example in tqdm(enumerate(examples)):
        tokens = [tokenizer.tokenize(_char)[0] for _char in example.text_a][: max_seq_length-special_tokens_count]
        label_ids = [label2id[_label] for _label in example.label][: max_seq_length-special_tokens_count]
        token_type_ids = [token_a_type_id] * len(tokens)
        assert len(tokens) == len(label_ids) == len(token_type_ids)
        # 添加CLS
        tokens = [cls_token] + tokens
        label_ids = [label2id["O"]] + label_ids
        token_type_ids = [0] + token_type_ids

        # 添加SEP
        tokens += [sep_token]
        label_ids.append(label2id["O"])
        token_type_ids.append(token_a_type_id)

        input_ids = tokenizer.convert_tokens_to_ids(tokens)
        attention_mask = [1] * len(input_ids)
        input_len = len(label_ids)

        padding_length = max_seq_length - len(input_ids)
        input_ids += [pad_token] * padding_length
        attention_mask += [0] * padding_length
        token_type_ids += [token_a_type_id] * padding_length
        label_ids += [pad_token] * padding_length

        assert len(input_ids) == len(attention_mask) == len(token_type_ids) == len(label_ids)

        features.append(InputFeatures(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            label_ids=label_ids,
            input_len=input_len
        ))
    return features


def get_tensor_dataset(features):
    """将features转换为dataset"""
    input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.bool)
    token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
    label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
    input_lens = torch.tensor([f.input_len for f in features], dtype=torch.long)
    dataset = TensorDataset(input_ids, attention_mask, token_type_ids, label_ids, input_lens)
    return dataset


class ExampleProcessor(object):
    def __init__(self, data_dir, markup):
        self.data_dir = data_dir
        self.markup = markup.upper()
        assert self.markup in ["BIO", "BIOES"]
        self.label_path = os.path.join(self.data_dir, "label.txt")
        self.__get_labels()

    def __get_labels(self):
        """获取标签"""
        label_names = []
        with open(self.label_path, "r", encoding="utf8") as f:
            for line in f.readlines():
                line = line.strip()
                if not line:
                    continue
                label_names.append(line)
        labels = []
        for name in label_names:
            labels.extend([f"{tag}-{name}" for tag in self.markup])
        labels.append("O")
        self.__labels = labels

    @property
    def id2label(self):
        return {idx: label for idx, label in enumerate(self.__labels)}

    @property
    def label2id(self):
        return {label: idx for idx, label in enumerate(self.__labels)}

    def __gen_labeling(self, json_line):
        labels = ["O"] * len(json_line["text"])
        for key, detail in json_line["label"].items():
            # example {'叶老桂': [[9, 11]]}
            for name, multi_pos in detail.items():
                # [[9, 11]]
                for single_pos in multi_pos:
                    if self.markup == "BIO":
                        labels[single_pos[0]] = f"B-{key}"
                        if single_pos[1] > single_pos[0]:
                            labels[single_pos[0]+1:single_pos[1]+1] = [f"I-{key}"] * (len(name) - 1)
                    elif self.markup == "BIOES":
                        if single_pos[0] == single_pos[1]:
                            labels[single_pos[0]] = f"S-{key}"
                        else:
                            labels[single_pos[0]] = f"B-{key}"
                            labels[single_pos[1]] = f"E-{key}"
                            for idx in range(single_pos[0]+1, single_pos[1]):
                                labels[idx] = f"I-{key}"
                    else:
                        raise ValueError("参数错误")
        return labels

    def get_train_examples(self):
        train_examples = list()
        count = 0
        with open(os.path.join(self.data_dir, "train.txt"), "r", encoding="utf8") as f:
            for line in f.readlines():
                if not line.strip():
                    continue
                count += 1
                json_line = json.loads(line)
                texts = list(json_line["text"])
                labels = self.__gen_labeling(json_line)
                assert len(texts) == len(labels)
                train_examples.append(
                    InputExample(count, texts, labels)
                )
        return train_examples

    def get_dev_examples(self):
        dev_examples = list()
        count = 0
        with open(os.path.join(self.data_dir, "dev.txt"), "r", encoding="utf8") as f:
            for line in f.readlines():
                if not line.strip():
                    continue
                count += 1
                json_line = json.loads(line)
                texts = list(json_line["text"])
                labels = self.__gen_labeling(json_line)
                assert len(texts) == len(labels)
                dev_examples.append(
                    InputExample(count, texts, labels)
                )
        return dev_examples

    def get_test_examples(self):
        test_examples = list()
        count = 0
        with open(os.path.join(self.data_dir, "test.json"), "r", encoding="utf8") as f:
            for line in f.readlines():
                if not line.strip():
                    continue
                count += 1
                json_line = json.loads(line)
                test_examples.append(InputExample(count, list(json_line["text"]), ["O"]*len(json_line["text"])))
        return test_examples


def load_and_cache_examples(args, tokenizer, processor, data_type, logger):
    cached_feature_file = os.path.join(args.dataset_dir, f"{data_type}_{args.max_seq_length}.pkl")
    if os.path.exists(cached_feature_file):
        logger.info("从缓存加载数据集...")
        features = torch.load(cached_feature_file)
    else:
        logger.info("正在新生成数据集...")
        if data_type == "train":
            examples = processor.get_train_examples()
        elif data_type == "dev":
            examples = processor.get_dev_examples()
        else:
            examples = processor.get_test_examples()
        features = convert_examples_to_features(examples, processor.label2id, tokenizer, args.max_seq_length)
        torch.save(features, cached_feature_file)
    dataset = get_tensor_dataset(features)
    return dataset


def get_bio_entity(seq_ids, id2label):
    """
    :param seq_ids: [1, 2, 3]
    :param id2label: {2: B-PER, ...}
    :return: [['PER', 0,1], ['LOC', 3, 3]]
    """
    chunks = []
    chunk = ["", -1, -1]
    for idx, _id in enumerate(seq_ids):
        # ['B-PER', 'I-PER', 'O', 'B-LOC']
        tag = id2label[_id]
        if tag.startswith("B-"):
            if chunk[2] != -1:
                chunks.append(chunk)
            chunk = [tag.split('-')[1], idx, idx]
            if idx == len(seq_ids) - 1:
                chunks.append(chunk)
        elif tag.startswith('I-') and chunk[1] != -1:
            if tag.split('-')[1] == chunk[0]:
                chunk[2] = idx

            if idx == len(seq_ids) - 1:
                chunks.append(chunk)
        else:
            if chunk[2] != -1:
                chunks.append(chunk)
            chunk = ["", -1, -1]
    return chunks


def get_bioes_entity(seq_ids, id2label):
    """
    1. 如果是S-， 独立实体，直接添加
    2. 如果是B-， 则添加起始位置
    3. 如果是I-， 判断起始位置是否存在，且要保证实体类型一致
    4. 如果是E-， 实体类型一致，添加截止位置
    """
    chunks = []
    chunk = ("", -1, -1)
    for idx, _id in enumerate(seq_ids):
        tag = id2label[_id]
        if tag.startswith("S-"):
            chunks.append([tag.split('-')[1], idx, idx])
            chunk = ["", -1, -1]
        if tag.startswith("B-"):
            chunk = ["", -1, -1]
            chunk[0] = tag.split('-')[1]
            chunk[1] = idx
        elif tag.startswith('I-') and chunk[1] != -1:
            _type = tag.split('-')[1]
            if _type != chunk[0]:
                chunk = ["", -1, -1]
        elif tag.startswith('E-') and chunk[1] != -1:
            if tag.split('-')[1] == chunk[0]:
                chunk[2] = idx
                chunks.append(chunk)
            chunk = ["", -1, -1]
        else:
            chunk = ["", -1, -1]
    return chunks


def get_entity(seq_ids, id2label, markup):
    markup = markup.upper()
    assert markup in ["BIO", "BIOES"]
    if markup == "BIO":
        return get_bio_entity(seq_ids, id2label)
    return get_bioes_entity(seq_ids, id2label)


def gen_classify_entity(entities):
    """
    输入： [['PER', 0,1], ['PER', 3, 3]]
    输出: {PER: [[0, 1], [3, 3]]}
    """
    result = dict()
    for entity in entities:
        if entity[0] not in result.keys():
            result[entity[0]] = [entity[1:]]
        else:
            result[entity[0]].append(entity[1:])
    return result


def convert_pred2label(classify_entities, text_a):
    """
    :param classify_entities:  {'name': [[7, 9]], 'movie': [[16, 19]]}
    :param text_a: 在记者会现场，任达华秀了一段他在《文雀》中学到的绝活，就是将刀片放在口中翻转。
    :return: {'name': {'任达': [[7, 9]]}, 'movie': {'《文雀': [[16, 19]]}}
    """
    pred_dict = dict()
    for entity_key, entity_lst in classify_entities.items():
        tmp_dict = dict()
        for entity_idx in entity_lst:
            str_entity = text_a[entity_idx[0]: entity_idx[1]+1]
            if str_entity not in tmp_dict:
                tmp_dict[str_entity] = [entity_idx]
            else:
                tmp_dict[str_entity].append(entity_idx)

        pred_dict[entity_key] = tmp_dict
    return pred_dict


def timer(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print("函数 %s 运行时间: %f 秒" % (func.__name__, end_time - start_time))
        return result
    return wrapper


if __name__ == "__main__":
    a =[[12, 13, 13, 13, 13, 13, 24, 25, 25, 25, 25, 30, 24, 25, 25, 9, 10, 10, 30, 12, 13, 13, 13, 13, 13, 24, 25, 25, 25,
      9, 10, 10, 30, 30, 30, 30, 30]]
    b = {0: 'B-company', 1: 'I-company', 2: 'O-company', 3: 'B-organization', 4: 'I-organization', 5: 'O-organization', 6: 'B-game', 7: 'I-game', 8: 'O-game', 9: 'B-name', 10: 'I-name', 11: 'O-name', 12: 'B-government', 13: 'I-government', 14: 'O-government', 15: 'B-movie', 16: 'I-movie', 17: 'O-movie', 18: 'B-address', 19: 'I-address', 20: 'O-address', 21: 'B-book', 22: 'I-book', 23: 'O-book', 24: 'B-position', 25: 'I-position', 26: 'O-position', 27: 'B-scene', 28: 'I-scene', 29: 'O-scene', 30: 'O'}
    c = "BIO"
    get_entity(a, b, c)