import torch
import numpy as np
from PIL import Image
from tokenizer import encode

def load_data(path: str):
    if path[-1] != '/':
        path += '/'
    
    text = open(path + 'text.txt').read()
    text = text[1:].split('\n')
    ids = []
    for s in text:
        tensor = torch.tensor(encode(s))
        ids.append(tensor)
    
    images = []
    for i in range(len(ids)):
        image = Image.open(path + str(i) + '.png')
        tensor = torch.tensor(np.array(image), dtype=torch.float)
        tensor = tensor / 255.0
        tensor = tensor.permute([2, 1, 0])
        images.append(tensor)
    images = torch.stack(images)

    return ids, images

train_ids, train_imgs = load_data("../dataset/files/train")
test_ids, test_imgs = load_data("../dataset/files/test")

