import json
import os
from typing import Dict, Tuple, List, Optional

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import CosineAnnealingLR
import matplotlib.pyplot as plt

from model import MLP


def set_seed(seed: int) -> None:
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


def load_processed(data_dir: str) -> Tuple[pd.DataFrame, pd.DataFrame, Dict]:
    train_df = pd.read_csv(os.path.join(data_dir, "train.csv"))
    valid_df = pd.read_csv(os.path.join(data_dir, "valid.csv"))
    with open(os.path.join(data_dir, "feature_columns.json"), "r", encoding="utf-8") as f:
        feature_meta = json.load(f)
    return train_df, valid_df, feature_meta


def build_dataloaders(
    train_df: pd.DataFrame,
    valid_df: pd.DataFrame,
    feature_meta: Dict,
    batch_size: int,
) -> Tuple[DataLoader, DataLoader, int, int]:
    feature_columns = feature_meta["feature_columns"]

    # Coerce to numeric and uniform dtype to avoid object arrays
    X_train_np = (
        train_df[feature_columns]
        .apply(pd.to_numeric, errors="coerce")
        .fillna(0.0)
        .to_numpy(dtype=np.float32)
    )
    X_valid_np = (
        valid_df[feature_columns]
        .apply(pd.to_numeric, errors="coerce")
        .fillna(0.0)
        .to_numpy(dtype=np.float32)
    )

    X_train = torch.tensor(X_train_np, dtype=torch.float32)
    y_train = torch.tensor(train_df["label"].values, dtype=torch.long)
    X_valid = torch.tensor(X_valid_np, dtype=torch.float32)
    y_valid = torch.tensor(valid_df["label"].values, dtype=torch.long)

    num_features = X_train.shape[1]
    num_classes = int(max(y_train.max().item(), y_valid.max().item()) + 1)

    train_ds = TensorDataset(X_train, y_train)
    valid_ds = TensorDataset(X_valid, y_valid)
    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=False)
    valid_loader = DataLoader(valid_ds, batch_size=batch_size, shuffle=False, drop_last=False)

    return train_loader, valid_loader, num_features, num_classes


def evaluate(model: nn.Module, loader: DataLoader, device: torch.device, loss_fn: nn.Module) -> Tuple[float, float]:
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for xb, yb in loader:
            xb = xb.to(device)
            yb = yb.to(device)
            logits = model(xb)
            loss = loss_fn(logits, yb)
            total_loss += loss.item() * yb.size(0)
            preds = logits.argmax(dim=1)
            correct += (preds == yb).sum().item()
            total += yb.size(0)
    avg_loss = total_loss / max(total, 1)
    acc = correct / max(total, 1)
    return avg_loss, acc


def _total_grad_norm(parameters, norm_type: float = 2.0) -> float:
    # Compute global grad norm without modifying grads
    grads: List[torch.Tensor] = []
    for p in parameters:
        if p.grad is not None:
            grads.append(p.grad.detach())
    if not grads:
        return 0.0
    device = grads[0].device
    if norm_type == float("inf"):
        norms = [g.abs().max().to(device) for g in grads]
        total_norm = torch.stack(norms).max()
    else:
        norms = torch.stack([torch.norm(g, norm_type) for g in grads])
        total_norm = torch.norm(norms, norm_type)
    return float(total_norm.item())


def _plot_training_curves(history: Dict[str, List[float]], out_dir: str, filename: str = "training_curves.png") -> None:
    epochs = history["epoch"]
    fig, ax1 = plt.subplots(figsize=(9, 5))

    # Loss curves on left y-axis
    ax1.plot(epochs, history["train_loss"], label="Train Loss", color="tab:blue")
    ax1.plot(epochs, history["valid_loss"], label="Valid Loss", color="tab:orange")
    ax1.set_xlabel("Epoch")
    ax1.set_ylabel("Loss")
    ax1.grid(True, linestyle="--", alpha=0.3)

    # Grad norm on right y-axis
    ax2 = ax1.twinx()
    ax2.plot(epochs, history["avg_grad_norm"], label="Grad Norm", color="tab:green")
    ax2.set_ylabel("Gradient Norm")

    # Merge legends
    lines1, labels1 = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax1.legend(lines1 + lines2, labels1 + labels2, loc="best")

    plt.tight_layout()
    save_path = os.path.join(out_dir, filename)
    plt.savefig(save_path, dpi=160)
    plt.close(fig)


def _make_run_name(
    hidden_dims: Tuple[int, ...],
    activation: str,
    dropout: float,
    batch_size: int,
    epochs: int,
    lr_max: float,
    lr_min: Optional[float],
    max_grad_norm: float,
    seed: int,
) -> str:
    hid = "-".join(str(h) for h in hidden_dims) if hidden_dims else "none"
    parts = [
        f"h{hid}",
        f"act-{activation}",
        f"do-{dropout}",
        f"bs-{batch_size}",
        f"ep-{epochs}",
        f"lrmax-{lr_max}",
        f"lrmin-{lr_min}" if lr_min is not None else f"lr-{lr_max}",
        f"clip-{max_grad_norm}",
        f"seed-{seed}",
    ]
    # Keep filename concise and safe
    safe = "_".join(parts).replace("/", "-")
    return safe


def train(
    data_dir: str,
    hidden_dims: Tuple[int, ...],
    activation: str,
    dropout: float,
    epochs: int,
    batch_size: int,
    lr: float,
    max_grad_norm: float,
    seed: int,
    device_str: str,
    out_dir: str,
    lr_min: Optional[float] = None,  # None -> constant LR; otherwise cosine annealing between [lr_min, lr]
) -> None:
    set_seed(seed)
    # Prefer GPU by default; fallback to CPU
    preferred = device_str if device_str else "cuda"
    device = torch.device(preferred if torch.cuda.is_available() else "cpu")

    train_df, valid_df, feature_meta = load_processed(data_dir)
    train_loader, valid_loader, input_dim, num_classes = build_dataloaders(
        train_df, valid_df, feature_meta, batch_size
    )

    model = MLP(
        input_dim=input_dim,
        hidden_dims=hidden_dims,
        num_classes=num_classes,
        activation=activation,
        dropout=dropout,
    ).to(device)

    # Optimizer and (optional) cosine annealing scheduler
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    use_cosine = lr_min is not None and lr_min >= 0.0 and lr > lr_min
    scheduler = CosineAnnealingLR(optimizer, T_max=max(epochs, 1), eta_min=lr_min) if use_cosine else None

    loss_fn = nn.CrossEntropyLoss()

    # Book-keeping
    best_valid_acc = 0.0
    os.makedirs(out_dir, exist_ok=True)
    run_name = _make_run_name(
        hidden_dims=hidden_dims,
        activation=activation,
        dropout=dropout,
        batch_size=batch_size,
        epochs=epochs,
        lr_max=lr,
        lr_min=lr_min,
        max_grad_norm=max_grad_norm,
        seed=seed,
    )
    best_ckpt_path = os.path.join(out_dir, f"best_model_{run_name}.pt")

    history: Dict[str, List[float]] = {
        "epoch": [],
        "train_loss": [],
        "train_acc": [],
        "valid_loss": [],
        "valid_acc": [],
        "avg_grad_norm": [],
        "max_grad_norm": [],
        "lr": [],
    }

    for epoch in range(1, epochs + 1):
        model.train()
        total_loss = 0.0
        total = 0
        correct = 0

        grad_norm_sum = 0.0
        grad_norm_max = 0.0
        num_steps = 0

        for xb, yb in train_loader:
            xb = xb.to(device)
            yb = yb.to(device)

            optimizer.zero_grad(set_to_none=True)
            logits = model(xb)
            loss = loss_fn(logits, yb)
            loss.backward()

            # Gradient norm (unclipped) for logging
            step_grad_norm = _total_grad_norm(model.parameters(), norm_type=2.0)
            grad_norm_sum += step_grad_norm
            grad_norm_max = max(grad_norm_max, step_grad_norm)
            num_steps += 1

            # Clip if requested
            if max_grad_norm and max_grad_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
            optimizer.step()

            total_loss += loss.item() * yb.size(0)
            preds = logits.argmax(dim=1)
            correct += (preds == yb).sum().item()
            total += yb.size(0)

        # End of epoch metrics
        train_loss = total_loss / max(total, 1)
        train_acc = correct / max(total, 1)
        valid_loss, valid_acc = evaluate(model, valid_loader, device, loss_fn)

        if scheduler is not None:
            scheduler.step()

        # Log history (use the lr of first param group)
        current_lr = optimizer.param_groups[0]["lr"]
        avg_grad_norm = (grad_norm_sum / num_steps) if num_steps > 0 else 0.0

        history["epoch"].append(epoch)
        history["train_loss"].append(train_loss)
        history["train_acc"].append(train_acc)
        history["valid_loss"].append(valid_loss)
        history["valid_acc"].append(valid_acc)
        history["avg_grad_norm"].append(avg_grad_norm)
        history["max_grad_norm"].append(grad_norm_max)
        history["lr"].append(current_lr)

        print(
            f"Epoch {epoch:03d} | "
            f"Train loss {train_loss:.4f} acc {train_acc:.4f} | "
            f"Valid loss {valid_loss:.4f} acc {valid_acc:.4f} | "
            f"GradNorm avg {avg_grad_norm:.4f} | LR {current_lr:.6f}"
        )

        # Save best checkpoint with parameters in filename
        if valid_acc > best_valid_acc:
            best_valid_acc = valid_acc
            torch.save(
                {
                    "model_state": model.state_dict(),
                    "input_dim": input_dim,
                    "hidden_dims": list(hidden_dims),
                    "num_classes": num_classes,
                    "activation": activation,
                    "dropout": dropout,
                    "feature_columns": feature_meta["feature_columns"],
                    "lr_max": lr,
                    "lr_min": lr_min,
                    "epochs": epochs,
                    "batch_size": batch_size,
                    "max_grad_norm": max_grad_norm,
                    "seed": seed,
                    "history": history,  # store curves for later inspection
                },
                best_ckpt_path,
            )

    # Save curves
    try:
        _plot_training_curves(history, out_dir=out_dir, filename="training_curves.png")
    except Exception as e:
        print(f"Plotting failed: {e}")

    # Persist raw history as JSON for further custom plotting
    try:
        with open(os.path.join(out_dir, "history.json"), "w", encoding="utf-8") as f:
            json.dump(history, f, indent=2)
    except Exception as e:
        print(f"Failed to save history.json: {e}")

    print(f"Best valid acc: {best_valid_acc:.4f}. Saved to {best_ckpt_path}")
    print(f"Curves saved to {os.path.join(out_dir, 'training_curves.png')}")


def main() -> None:
    # Configuration (press F5 to run)
    DATA_DIR = "./processed"
    HIDDEN_DIMS = (256, 128, 64)
    ACTIVATION = "relu"  # relu | leakyrelu | gelu
    DROPOUT = 0.3
    EPOCHS = 15
    BATCH_SIZE = 1024
    LR = 1e-3           # used as lr_max when cosine annealing is enabled
    LR_MIN = 1e-5       # set to None to disable cosine annealing (keep constant LR)
    MAX_GRAD_NORM = 1.0
    SEED = 42
    DEVICE = "cuda"     # default to GPU (fallback to CPU if unavailable)
    OUT_DIR = "./checkpoints"

    train(
        data_dir=DATA_DIR,
        hidden_dims=HIDDEN_DIMS,
        activation=ACTIVATION,
        dropout=DROPOUT,
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
        lr=LR,
        max_grad_norm=MAX_GRAD_NORM,
        seed=SEED,
        device_str=DEVICE,
        out_dir=OUT_DIR,
        lr_min=LR_MIN,
    )


if __name__ == "__main__":
    main()
