import json
import torch
from ..config.config import Config
from .common import Common
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence

class NerDataset(Dataset):
    def __init__(self, datas):
        super().__init__()
        self.datas = datas

    def __len__(self):
        # 返回数据集
        return len(self.datas)

    def __getitem__(self, item):
        # [0]表示sample_x
        x = self.datas[item][0]
        # [1]表示sample_y
        y = self.datas[item][1]
        return x, y

class NerDataLoader:
    def __init__(self):
        """
        初始化参数
        """
        # 实例化配置对象
        self.config = Config()
        # 实例化common对象
        self.common = Common()
        # 获取datas数据和word2id
        self.datas, self.word2id = self.common.build_data()
        # 标签字典数据
        self.tag2id = json.load(open(self.config.tag2id_path))
        self.target = list(self.tag2id.keys())

    def collate_fn(self, batch):
        """
        创建词表映射
        :param batch: 每个批次的数据
        :return:
        """
        x_train = [torch.tensor([self.word2id[char] for char in data[0]]) for data in batch]
        y_train = [torch.tensor([self.tag2id[label] for label in data[1]]) for data in batch]
        # 补齐input_ids, 使用0作为填充值
        input_ids_padded = pad_sequence(x_train, batch_first=True, padding_value=0)
        # # 补齐labels，使用0作为填充值
        labels_padded = pad_sequence(y_train, batch_first=True, padding_value=-100)
        # 创建attention mask
        attention_mask = (input_ids_padded != 0).long()
        return input_ids_padded, labels_padded, attention_mask

    def get_data(self):
        """
        构建数据迭代器
        """
        train_dataset = NerDataset(self.datas[:6200])
        train_dataloader = DataLoader(dataset=train_dataset,
                                      batch_size=self.config.batch_size,
                                      collate_fn=self.collate_fn,
                                      drop_last=True,
                                      )

        dev_dataset = NerDataset(self.datas[6200:])
        dev_dataloader = DataLoader(dataset=dev_dataset,
                                    batch_size=self.config.batch_size,
                                    collate_fn=self.collate_fn,
                                    drop_last=True,
                                    )
        return train_dataloader, dev_dataloader

