#!/usr/bin/env python3
"""
Train a small MLP as Learned Write Gate (LWG) from labeled CSV built by build_dataset.py.
Supports new SMD_DanceTrack dataset structure.
"""
import os
import csv
import argparse
import random
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import math


class CsvDataset(Dataset):
    def __init__(self, path):
        self.rows = []
        if not os.path.exists(path):
            raise FileNotFoundError(f"CSV file not found: {path}")
        with open(path, "r") as f:
            reader = csv.reader(f)
            header = next(reader, None)
            if header is None:
                raise ValueError(f"CSV is empty: {path}")
            # expected header: seq,t,idx,id_label,track_id,s_det,sim1,margin,area_ratio,aspect_ratio,y
            for row in reader:
                if not row:
                    continue
                try:
                    s_det = float(row[5])
                    sim1 = float(row[6])
                    margin = float(row[7])
                    area_ratio = float(row[8])
                    aspect_ratio = float(row[9])
                    y = int(row[10])
                except Exception as e:
                    print(f"[WARN] Skipping row due to error: {e}")
                    continue
                self.rows.append((
                    torch.tensor([s_det, sim1, margin, area_ratio, aspect_ratio], dtype=torch.float32),
                    y
                ))

    def __len__(self):
        return len(self.rows)

    def __getitem__(self, idx):
        return self.rows[idx]


class MLP(nn.Module):
    def __init__(self, in_dim=5, hidden=(64, 16)):
        super().__init__()
        layers = []
        last = in_dim
        for h in hidden:
            layers += [nn.Linear(last, h), nn.ReLU(inplace=True)]
            last = h
        layers += [nn.Linear(last, 1)]
        self.net = nn.Sequential(*layers)

    def forward(self, x):
        return torch.sigmoid(self.net(x)).view(-1)


def split_dataset(ds, val_ratio=0.1):
    n = len(ds)
    idxs = list(range(n))
    random.shuffle(idxs)
    n_val = int(n * val_ratio)
    val_idx = idxs[:n_val]
    tr_idx = idxs[n_val:]
    return torch.utils.data.Subset(ds, tr_idx), torch.utils.data.Subset(ds, val_idx)


def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--train_csv", type=str, required=True, help="Path to labeled CSV from build_dataset.py")
    ap.add_argument("--out_model", type=str, default="./pretrains/lwg_model.pt", help="Path to save LWG model")
    ap.add_argument("--epochs", type=int, default=20, help="Number of training epochs")
    ap.add_argument("--batch_size", type=int, default=1024, help="Batch size for training")
    ap.add_argument("--lr", type=float, default=1e-3, help="Learning rate")
    ap.add_argument("--val_ratio", type=float, default=0.1, help="Validation set ratio")
    args = ap.parse_args()

    print(f"[INFO] Loading dataset from {args.train_csv}")
    ds = CsvDataset(args.train_csv)
    tr, va = split_dataset(ds, val_ratio=args.val_ratio)

    tr_loader = DataLoader(tr, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True)
    va_loader = DataLoader(va, batch_size=4096, shuffle=False, num_workers=2, pin_memory=True)

    model = MLP()
    model.train()
    opt = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=1e-4)
    bce = nn.BCELoss()

    def evaluate_metrics(loader):
        model.eval()
        probs = []
        labels = []
        with torch.no_grad():
            for x, y in loader:
                p = model(x).detach().cpu()
                probs.append(p)
                labels.append(y.detach().cpu())
        model.train()

        if len(probs) == 0:
            return {"acc": 0.0, "pr_auc": 0.0, "best_f1": 0.0, "best_thr": 0.5}

        probs = torch.cat(probs).float()
        labels = torch.cat(labels).long()

        pred = (probs >= 0.5).long()
        acc = float((pred == labels).sum().item() / max(1, labels.numel()))

        # Precision-recall curve
        vals, idxs = torch.sort(probs, descending=True)
        y_sorted = labels[idxs]
        tp, fp = 0, 0
        fn = int((labels == 1).sum().item())
        precisions, recalls, thrs = [], [], []
        best_f1, best_thr = 0.0, 0.5
        last_v = math.inf

        for i in range(len(vals)):
            v = float(vals[i].item())
            if y_sorted[i].item() == 1:
                tp += 1
                fn -= 1
            else:
                fp += 1
            if v != last_v:
                prec = tp / max(1, tp + fp)
                rec = tp / max(1, tp + fn)
                f1 = 0.0 if (prec + rec) == 0 else 2 * prec * rec / (prec + rec)
                precisions.append(prec)
                recalls.append(rec)
                thrs.append(v)
                if f1 > best_f1:
                    best_f1 = f1
                    best_thr = v
                last_v = v

        if len(recalls) == 0 or recalls[0] != 0.0:
            recalls = [0.0] + recalls
            precisions = [precisions[0] if precisions else 1.0] + precisions
        if recalls[-1] != 1.0:
            recalls.append(1.0)
            precisions.append(precisions[-1] if precisions else 0.0)

        pr_auc = 0.0
        for i in range(1, len(recalls)):
            pr_auc += 0.5 * (precisions[i] + precisions[i - 1]) * (recalls[i] - recalls[i - 1])

        return {"acc": acc, "pr_auc": pr_auc, "best_f1": best_f1, "best_thr": best_thr}

    print("[INFO] Starting training...")
    for ep in range(args.epochs):
        model.train()
        total_loss = 0.0
        for x, y in tr_loader:
            p = model(x)
            loss = bce(p, y.float())
            opt.zero_grad()
            loss.backward()
            opt.step()
            total_loss += loss.item()
        metrics = evaluate_metrics(va_loader)
        print(
            f"Epoch {ep+1}/{args.epochs} | Loss: {total_loss:.4f} | "
            f"Val Acc: {metrics['acc']:.4f} | PR-AUC: {metrics['pr_auc']:.4f} | "
            f"Best F1: {metrics['best_f1']:.4f} @ Thr: {metrics['best_thr']:.3f}"
        )

    os.makedirs(os.path.dirname(args.out_model), exist_ok=True)
    payload = {
        "state_dict": model.state_dict(),
        "hidden": [64, 16],
        "in_dim": 5
    }
    torch.save(payload, args.out_model)
    print(f"[INFO] Saved LWG model to {args.out_model}")


if __name__ == "__main__":
    main()
