import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(os.path.join(parent_dir, 'ch04'))
sys.path.append(os.path.join(parent_dir, 'ch02'))

import torch
from tokenizer.gpt2 import GPT2Tokenizer
from transformer import generate_text_simple, GPTModel
from train import load_data, train, save_model, load_model, plot_loss


def text_to_token_ids(text, tokenizer):
    return torch.tensor(tokenizer.encode(text)).unsqueeze(0)


def tokens_to_text(tokens, tokenizer):
    return tokenizer.decode(tokens.squeeze(0).tolist())


GPT_CONFIG_124M = {
    "vocab_size": 50257,
    "context_length": 256,
    "emb_dim": 768,
    "n_heads": 12,
    "n_layers": 12,
    "drop_rate": 0.1,
    "qkv_bias": False,
    "learning_rate": 0.0001,
    "epochs": 10,
    "batch_size": 2,
}
DATA_PATH = os.path.join(parent_dir, "the-verdict.txt")


def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    tokenizer = GPT2Tokenizer()
    text = "Hello, World!"
    input_tokens = text_to_token_ids(text, tokenizer)
    print(f"input_tokens: {input_tokens}")

    model = GPTModel(GPT_CONFIG_124M)
    model.to(device)
    model.eval()
    input_tokens = input_tokens.to(device)
    output_tokens = generate_text_simple(model, input_tokens, 10, GPT_CONFIG_124M["context_length"])
    print(f"output_tokens: {output_tokens}")

    output_text = tokens_to_text(output_tokens, tokenizer)
    print(f"output_text: {output_text}")

    train_loader, val_loader = load_data(DATA_PATH, GPT_CONFIG_124M, sample_ratio=0.1)
    stats = train(model, train_loader, val_loader, GPT_CONFIG_124M)
    plot_loss(stats)


if __name__ == "__main__":
    main()
