from torch.utils.data import Dataset
import pandas as pd
from torchtext.data.utils import get_tokenizer

class Mydataset(Dataset):
    def __init__(self,is_train=True):
        super().__init__()

        if is_train:
            data = pd.read_csv(r"/root/project/Code/sshcode/word_vector/data/ag_news_csv/train.csv")
        else:
            data = pd.read_csv(r"/root/project/Code/sshcode/word_vector/data/ag_news_csv/test.csv")
        self.text_list = list()
        tokenizer = get_tokenizer("basic_english")
        for idx,row in data.iterrows():
            label = row.iloc[0]
            text = row.iloc[2]
            tokens = tokenizer(text)
            self.text_list.append((tokens, label))
            # print(tokens)


    def __len__(self):
        return len(self.text_list)

    def __getitem__(self, item):
        return self.text_list[item]


if __name__ == '__main__':
    dataset = Mydataset()
    print(dataset[3])
