import torch
from torchtext.datasets import IMDB
from torch.utils.data import DataLoader
from torchtext.data.functional import to_map_style_dataset
from functools import partial

imdb_datapipe, test_pipeline = IMDB('d:\\pytorch\\data')

imdb_batch_size = 3
task = "sst2 sentence"
labels = {"1": "negative", "2": "positive"}

test_pos_file = 'd:\\pytorch\\data\\datasets\\IMDB\\aclImdb_v1\\train\\neg.64'
def process_labels(labels, x):
    return x[1], labels[str(x[0])]


def apply_prefix(task, x):
    return f"{task}: " + x[0], x[1]

#
# imdb_datapipe = imdb_datapipe.map(partial(process_labels, labels))
# imdb_datapipe = imdb_datapipe.map(partial(apply_prefix, task))
# imdb_datapipe = imdb_datapipe.batch(imdb_batch_size)
# imdb_datapipe = imdb_datapipe.rows2columnar(["text", "label"])
# imdb_dataloader = DataLoader(imdb_datapipe, batch_size=None)
#
# for x, y in imdb_dataloader:
#     print(x, y)


with open(test_pos_file, encoding='utf-8') as f:
    lines = f.readlines()
    for i, line in enumerate(lines):
        print(1, line, end='')
