import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(os.path.join(parent_dir, "ch02"))

import torch
from torch import nn
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
from dataset import create_gpt2_dataloader_v1

def load_data(path: str, config: dict, train_ratio: float = 0.9, sample_ratio: float = 1.0):
    with open(path, "r") as f:
        text = f.read()
    len_text = len(text)
    train_size = int(len_text * train_ratio * sample_ratio)
    train_text = text[:train_size]
    val_text = text[len_text - train_size:]
    train_loader = create_gpt2_dataloader_v1(
        train_text,
        batch_size=config["batch_size"],
        max_length=config["context_length"],
        num_workers=0,
        shuffle=True,
        drop_last=True,
    )
    val_loader = create_gpt2_dataloader_v1(
        val_text,
        batch_size=config["batch_size"],
        max_length=config["context_length"],
        num_workers=0,
        shuffle=False,
        drop_last=False,
    )
    return train_loader, val_loader

def train(model, train_loader: DataLoader, val_loader: DataLoader, config: dict):
    optimizer = torch.optim.Adam(model.parameters(), lr=config["learning_rate"])
    criterion = nn.CrossEntropyLoss()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.train()

    stats = {
        "loss": [],
        "val_loss": [],
        "accuracy": [],
        "val_accuracy": [],
    }
    for epoch in range(config["epochs"]):
        loss_samples = []
        val_loss_samples = []
        accuracy_samples = []
        val_accuracy_samples = []
        for batch in train_loader:
            input_ids, targets = batch
            input_ids = input_ids.to(device)
            targets = targets.to(device)
            optimizer.zero_grad()
            prob_outputs = model(input_ids)
            loss = criterion(prob_outputs.view(-1, prob_outputs.size(-1)), targets.view(-1))
            loss.backward()
            optimizer.step()
            loss_samples.append(loss.item())
            token_id_outputs = prob_outputs.argmax(dim=-1)
            accuracy_samples.append(accuracy(token_id_outputs, targets))
            print(f"outputs shape: {prob_outputs.shape}, token_id_outputs shape: {token_id_outputs.shape}")
            print(f"Epoch {epoch:03d}, Loss: {loss.item():.4f}, Accuracy: {accuracy_samples[-1]:.4f}")

        for batch in val_loader:
            input_ids, targets = batch
            input_ids = input_ids.to(device)
            targets = targets.to(device)
            prob_outputs = model(input_ids)
            loss = criterion(prob_outputs.view(-1, prob_outputs.size(-1)), targets.view(-1))
            val_loss_samples.append(loss.item())
            token_id_outputs = prob_outputs.argmax(dim=-1)
            val_accuracy_samples.append(accuracy(token_id_outputs, targets))
            print(f"Epoch {epoch:03d}, Val Loss: {loss.item():.4f}, Val Accuracy: {val_accuracy_samples[-1]:.4f}")

        stats["loss"].append(loss_samples)
        stats["val_loss"].append(val_loss_samples)
        stats["accuracy"].append(accuracy_samples)
        stats["val_accuracy"].append(val_accuracy_samples)

    return stats

def accuracy(outputs: torch.Tensor, targets: torch.Tensor) -> float:
    """Calculate accuracy for LLM predictions.
    
    Args:
        outputs: Model outputs with shape [batch_size, seq_len]
        targets: Target tokens with shape [batch_size, seq_len]
    
    Returns:
        float: Accuracy between 0 and 1
    """
    correct = (outputs == targets).float()
    return correct.mean().item()

def save_model(model, path: str):
    torch.save(model.state_dict(), path)

def load_model(model, path: str):
    model.load_state_dict(torch.load(path))
    return model

def plot_loss(stats: dict):
    """Plot training and validation loss and accuracy on the same figure.
    
    Args:
        stats: Dictionary containing loss and accuracy statistics
    """
    fig, ax1 = plt.subplots()
    
    # Plot loss on the first y-axis
    ax1.plot(stats["loss"], label="train loss", color="blue")
    ax1.plot(stats["val_loss"], label="val loss", color="red")
    ax1.set_xlabel("Epoch")
    ax1.set_ylabel("Loss", color="blue")
    ax1.tick_params(axis="y", labelcolor="blue")
    
    # Create second y-axis for accuracy
    ax2 = ax1.twinx()
    ax2.plot(stats["accuracy"], label="train accuracy", color="green", linestyle="--")
    ax2.plot(stats["val_accuracy"], label="val accuracy", color="orange", linestyle="--")
    ax2.set_ylabel("Accuracy", color="green")
    ax2.tick_params(axis="y", labelcolor="green")
    
    # Combine legends from both axes
    lines1, labels1 = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax1.legend(lines1 + lines2, labels1 + labels2, loc="upper right")
    
    plt.title("Training Metrics")
    plt.show()