# codding: utf-8
import torch
from torch.utils.data import Dataset, DataLoader
from process import *

word2id, id2word = get_word_id(config.train_file_path)


class MyDataset(Dataset):
    def __init__(self, data_path):
        super().__init__()
        self.datas = get_data(data_path)

    def __getitem__(self, index):
        sequence = self.datas[0][index]
        label = self.datas[1][index]
        positionE1 = self.datas[2][index]
        positionE2 = self.datas[3][index]
        entities = self.datas[4][index]
        return sequence, label, positionE1, positionE2, entities

    def __len__(self):
        return len(self.datas[0])


def collate_fn(batch):
    # print(batch)
    sequences = [item[0] for item in batch]
    label = [item[1] for item in batch]
    positionE1 = [item[2] for item in batch]
    positionE2 = [item[3] for item in batch]
    entities = [item[4] for item in batch]
    sequence_ids = []
    for sequence in sequences:
        sequence_ids.append(sent2id_padding(sequence, word2id))
    positionE1_ids = []
    positionE2_ids = []
    for position in positionE1:
        pos_ids = postion_padding(position)
        positionE1_ids.append(pos_ids)

#
def get_dataloader():
    train_dataset = MyDataset(config.train_file_path)
    train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, drop_last=True)
    return train_dataloader


if __name__ == '__main__':
    train_dataloader = get_dataloader()
    for batch in train_dataloader:
        break
