import numpy as np
import torch
import pandas as pd
from torch.utils.data import Dataset

label2id = {'[PAD]': 0, 'B': 1, 'I':2, 'O':3} 
id2label = {}

class BuildDataset(Dataset):
    def __init__(self, f_path, seq_length=128, tokenizer=None):
        super(BuildDataset, self).__init__()
        self.seq_length = seq_length
        self.tokenizer = tokenizer
        self.dataset = []
        data_df = pd.read_json(f_path)
        
        for idx in data_df.index.values:
            '''
                数据处理的方法
            '''
            self.dataset.append()

        for key, value in label2id.items():
            id2label[value] = key

    def __getitem__(self, index):  # 必须要重写该pytorch方法
        sentence, labels = self.dataset[index]
        label_ids = [label2id[tag] for tag in labels]
        token_ids = self.tokenizer.convert_tokens_to_ids(list(sentence))
        if len(token_ids)!=len(label_ids):
            print(index, list(sentence), token_ids, label_ids)
            assert len(token_ids)==len(label_ids)
        return token_ids, label_ids, len(token_ids)

    def __len__(self):
        return len(self.dataset)


def PadBatch(batch):  # 这里传过来的是元组形式
    max_len = max(_[2] for _ in batch)
    token_ids_tensor = torch.LongTensor(np.array([_[0] + [label2id['[PAD]']]*(max_len-_[2]) for _ in batch]))
    mask_tensor = (token_ids_tensor > 0)
    labels_tensor = torch.LongTensor([_[1] + [label2id['O']] * (max_len - _[2]) for _ in batch])

    return token_ids_tensor, mask_tensor, labels_tensor