from functools import partial

import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer

from common.ner_tools import convert_examples_to_features, format_json_line
from common.config import Config
from loguru import logger


def get_dataset():
    return TextToolNerDataset, TextToolNerDataloader


class TextToolNerDataset(Dataset):
    # 对文本标注工具的数据进行封装
    def __init__(self, project_id, label_list, text_all, config: Config = None,tokenizer=None,mode="train"):
        """
        Parameters
        ----------
        project_id : 文本标注工具生成的项目ID
        label_list : 字符串的List,实体标签列表
        config : yaml决定的config
        text_all:因为要跑主动学习,所以需要把所有的原始数据都加载进来
                 json字符串列表,list["text":{},"id":{}]
                 id号是每条文本的id号,需要从0开始,每一个项目保持唯一
        """
        self._format_label(label_list)
        self.project_id = project_id
        self.config = config
        self.status = self._load_if_exists()
        self.text_all = text_all
        self.all_ids = set(range(len(text_all)))

        self.train_ids = []  # 已经有标注结果的数据id号
        self.train_data = []  # 用于模型训练的数据

        self.test_data = []  # 用于模型预测的数据
        self.test_ids = []  # 用于模型预测的数据的id号
        self.mode = mode
        self.tokenizer = tokenizer
        self.callback = partial(convert_examples_to_features, label_list=self.label_list, tokenizer=self.tokenizer,
                                max_seq_length=self.config.train_max_seq_length)

    def set_mode(self, mode):
        assert mode in ["train", "test"]
        self.mode = mode

    def generate_test_data(self):
        """
        产出测试数据,供给主动学习模块使用
        Returns: list of text
        -------
        """
        test_ids = self.all_ids - set(self.train_ids)
        test_data = self.text_all[list(test_ids)]
        return test_data

    def put_test_data(self, test_data):
        """
        放入主动学习模块产生的下一批数据,用于模型预测
        test_data:  {"text":str,"id":int}
        Returns
        -------

        """
        assert self.mode == "test"
        for line in test_data:
            bio_line = format_json_line(line, callback=self.callback)
            self.test_data.append(bio_line)
            self.test_ids.append(line["id"])
        logger.info("加入测试数据,当前测试数据量:{}".format(len(self.test_data)))

    def put_train_data(self, text_tool_data):
        """
        Parameters
        ----------
        text_tool_data : 从文本标注工具中获取的数据。为了方便调试，将原始数据一起传入
                         List[{"text":str,"id":int,"label":{}}]
        """
        tmp_id = []
        for line in text_tool_data:
            line_feature = format_json_line(line, callback=self.callback)
            self.train_data.append(line_feature)
            self.train_ids.append(line["id"])
            tmp_id.append(line["id"])
        logger.info("加入训练数据,id为:{},当前训练数据量:{}".format(tmp_id, len(self.train_data)))

    def _format_label(self, label_list):
        """
        将字符串标签转为标准的BIO格式标签
        """
        self.label_list = ["X"]
        for label in label_list:
            self.label_list.append("B-" + label)
            self.label_list.append("I-" + label)
        self.label_list.append("O")
        self.markup = "bio"
        self.num_labels = len(self.label_list)
        self.id2label = {i: label for i, label in enumerate(self.label_list)}
        self.label2id = {label: i for i, label in enumerate(self.label_list)}
        self.markup = "bio"  # bios or bio, s represent single

    def _load_if_exists(self):
        """
        判断  project_id的文件是否存在
        """
        return False

    def __len__(self):
        if self.mode == "train":
            return len(self.train_data)
        else:
            return len(self.test_data)

    def __getitem__(self, index):
        if self.mode == "train":
            f = self.train_data[index]
        else:
            f = self.test_data[index]
        all_input_ids = torch.tensor(f.input_ids, dtype=torch.long)
        all_input_mask = torch.tensor(f.input_mask, dtype=torch.long)
        all_segment_ids = torch.tensor(f.segment_ids, dtype=torch.long)
        all_label_ids = torch.tensor(f.label_ids, dtype=torch.long)
        all_lens = torch.tensor(f.input_len, dtype=torch.long)
        return all_input_ids, all_input_mask, all_segment_ids, all_lens, all_label_ids

    def get_train_loader(self, batch_size=None):
        assert self.mode == "train"
        if batch_size is None:
            batch_size = self.config.model_config.batch_size
        return TextToolNerDataloader(self, batch_size=batch_size)

    def get_test_loader(self):
        assert self.mode == "test"
        return TextToolNerDataloader(self, batch_size=1, shuffle=False)



class TextToolNerDataloader(DataLoader):
    def __init__(self, dataset, batch_size, num_workers=0, shuffle=True):
        super(TextToolNerDataloader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle,
                                                    collate_fn=collate_fn, num_workers=num_workers)


def collate_fn(batch):
    """
    batch should be a list of (sequence, target, length) tuples...
    Returns a padded tensor of sequences sorted from longest to shortest,
    """
    all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))
    max_len = max(all_lens).item()
    all_input_ids = all_input_ids[:, :max_len]
    all_attention_mask = all_attention_mask[:, :max_len]
    all_token_type_ids = all_token_type_ids[:, :max_len]
    all_labels = all_labels[:, :max_len]
    return all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_lens
