import torch
import torch.nn.functional as F
from PIL import Image
import numpy as np

from model import Model, text_l
from tokenizer import encode, decode

epoch = 2
batch_size = 4
lr = 1e-4

def load_data(path: str):
    if path[-1] != '/':
        path += '/'
    
    text = open(path + 'text.txt').read()
    text = text[1:].split('\n')
    ids = []
    for s in text:
        tensor = torch.tensor(encode(s))
        tensor = F.pad(tensor, (0, text_l - len(tensor)))
        ids.append(tensor)
    ids = torch.stack(ids)
    
    images = []
    for i in range(len(ids)):
        image = Image.open(path + str(i) + '.png')
        tensor = torch.tensor(np.array(image), dtype=torch.float)
        tensor = tensor / 255.0
        tensor = tensor.permute([2, 1, 0])
        images.append(tensor)
    images = torch.stack(images)

    return ids.split(batch_size), images.split(batch_size)

train_ids, train_imgs = load_data("../dataset/files/train")
test_ids, test_imgs = load_data("../dataset/files/test")


def train(model, imgs, ids):
    optimizer = torch.optim.AdamW(model.parameters(), lr)

    for i in range(len(ids)):
        idx = ids[i]
        tgt = F.pad(idx[:, 1:], (0, 1))
        img = imgs[i]
        
        logits = model(img, idx)

        B, T, C = logits.shape
        logits = logits.view(B*T, C)
        tgt = tgt.view(B*T)

        pred = logits.argmax(-1)
        accuracy = torch.eq(pred, tgt).float().mean()

        loss = F.cross_entropy(logits, tgt)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print("loss: {} accuracy: {}".format(loss, accuracy), end="\r")
    
    print("")


def test(model, imgs, ids):
    avg_loss = 0
    for i in range(len(ids)):
        idx = ids[i]
        tgt = F.pad(idx[:, 1:], (0, 1))
        img = imgs[i]

        logits = model(img, idx)

        B, T, C = logits.shape
        logits = logits.view(B*T, C)
        tgt = tgt.view(B*T)

        loss = F.cross_entropy(logits, tgt)
        avg_loss += loss
    
    avg_loss /= len(ids)
    print("test_loss: {}".format(avg_loss))


model = Model()
print(sum(p.numel() for p in model.parameters())/1e6, 'M parameters')
state_dict = torch.load("model.pt")
model.load_state_dict(state_dict)

for i in range(epoch):
    print("epoch: {}".format(i))
    train(model, train_imgs, train_ids)
    test(model, test_imgs, test_ids)

state_dict = model.state_dict()
torch.save(state_dict, "model.pt")