# coding:utf-8
"""

"""
import json
import torch
from data_process import *
from common import *
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence  # 进行句子长短补齐或者截断

datas, word2id = build_data()


# 构建Dataset类
class NerDataset(Dataset):
    def __init__(self, datas):
        super().__init__()
        self.datas = datas

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index):
        x = self.datas[index][0]
        y = self.datas[index][-1]
        return x, y


def collate_fn(batch):
    """
    将每个batch的样本的字符转换为数字作为训练数据，标签转换为数字作为标签
    :param batch:
    :return:
    """
    x_train = [torch.tensor([word2id[word] for word in data[0]]) for data in batch]
    y_train = [torch.tensor([config.tag2id[tag] for tag in data[1]]) for data in batch]
    # 进行句子长短补齐或者截断 pad_sequence:可以只对一个批次的样本进行统一长度，而且，最后的长度时以该批次中最长样本的句子长度为基准
    input_ids_padded = pad_sequence(x_train, batch_first=True, padding_value=0)
    label_padded = pad_sequence(y_train, batch_first=True,
                                padding_value=0)  # -100是业界的规定，且在大模型内部计算损失时，如果填充的标签为-100，则不会参与损失计算，提高了模型训练速度。
    attention_mask = (input_ids_padded != 0).long().bool()  # nn.Embedding 层要求输入必须是 long 类型
    return input_ids_padded, label_padded, attention_mask


def get_data():
    train_dataset = NerDataset(datas[:int(len(datas) * 0.8)])
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=config.batch_size,
                                  collate_fn=collate_fn,
                                  shuffle=True,
                                  drop_last=True)
    dev_dataset = NerDataset(datas[int(len(datas) * 0.8):])
    dev_dataloader = DataLoader(dataset=dev_dataset,
                                 batch_size=config.batch_size,
                                 collate_fn=collate_fn,
                                 drop_last=True)
    return train_dataloader, dev_dataloader


if __name__ == '__main__':
    nd = NerDataset(datas)
    train_dataloader, test_dataloader = get_data()
    next(iter(train_dataloader))
