# -*- coding: UTF-8 -*-
"""
    @Author:YTQ
    @Time: 2022/11/28 10:33
    Description:
    
"""
import logging

from torch.utils import data
from config import *
import torch
from transformers import BertTokenizer


class DataSet(data.Dataset):
    def __init__(self, types='train'):
        super().__init__()
        if types == 'train':
            self.data_path = TRAIN_DATA_PATH
        elif types == 'dev':
            self.data_path = DEV_DATA_PATH
        elif types == 'test':
            self.data_path = TEST_DATA_PATH
        else:
            raise Exception("数据出错")
        with open(self.data_path, 'r', encoding=ENCODING) as f:
            self.lines = f.readlines()
        self.tokenizer = BertTokenizer.from_pretrained(MODULE_BERT_PATH)

    def __len__(self):
        return len(self.lines)

    def __getitem__(self, index):
        text, label = self.lines[index].split('\t')
        tokened = self.tokenizer(text)
        input_ids = tokened['input_ids']
        mask = tokened['attention_mask']
        if len(input_ids) < TEXT_MAX_LENGTH:
            pad_len = (TEXT_MAX_LENGTH - len(input_ids))
            input_ids += [BERT_PAD_ID] * pad_len
            mask += [0] * pad_len
        target = int(label)
        return torch.tensor(input_ids[:TEXT_MAX_LENGTH]), torch.tensor(mask[:TEXT_MAX_LENGTH]), torch.tensor(target)

