#!/usr/bin/env python3
"""
Train the buffer re-evaluation (Buffer Gate) MLP from aggregated buffer features.
"""

import os
import argparse
import csv
import random
from typing import List, Tuple

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader


FEATURE_COLS = [
    "len",
    "p_mean",
    "p_min",
    "p_max",
    "p_last",
    "iou_mean",
    "iou_min",
    "iou_max",
    "iou_last",
    "delta_mean",
    "delta_min",
    "delta_max",
    "delta_last",
    "margin_mean",
    "s_det_mean",
    "s_det_last",
    "area_mean",
    "aspect_mean",
]
LABEL_COL = "label"


class BufferDataset(Dataset):
    def __init__(self, rows: List[Tuple[List[float], float]]):
        self.rows = rows

    def __len__(self):
        return len(self.rows)

    def __getitem__(self, idx):
        x, y = self.rows[idx]
        return torch.tensor(x, dtype=torch.float32), torch.tensor([y], dtype=torch.float32)


class BufferGateMLP(nn.Module):
    def __init__(self, in_dim: int = len(FEATURE_COLS), hidden: List[int] | Tuple[int, ...] = (64, 16)):
        super().__init__()
        layers: List[nn.Module] = []
        last = in_dim
        for h in hidden:
            layers += [nn.Linear(last, h), nn.ReLU(inplace=True)]
            last = h
        layers += [nn.Linear(last, 1), nn.Sigmoid()]
        self.net = nn.Sequential(*layers)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.net(x).view(-1, 1)


def set_seed(seed: int):
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


def load_rows(csv_path: str, pos_weight: float = 1.0) -> List[Tuple[List[float], float]]:
    rows: List[Tuple[List[float], float]] = []
    with open(csv_path) as f:
        reader = csv.DictReader(f)
        for row in reader:
            try:
                features = []
                for key in FEATURE_COLS:
                    v = float(row.get(key, "0") or 0.0)
                    if not torch.isfinite(torch.tensor(v)):
                        v = 0.0
                    features.append(v)
                y = float(row.get(LABEL_COL, "0") or 0.0)
                rows.append((features, y))
            except Exception:
                continue
    if pos_weight != 1.0 and rows:
        # Reweight positives by duplication for quick balancing
        positives = [r for r in rows if r[1] >= 0.5]
        negatives = [r for r in rows if r[1] < 0.5]
        if positives and negatives:
            scaled_pos = []
            factor = max(1, int(pos_weight))
            for _ in range(factor):
                scaled_pos.extend(positives)
            rows = negatives + scaled_pos
            random.shuffle(rows)
    return rows


def train_one(
    csv_path: str,
    out_path: str,
    epochs: int = 10,
    batch_size: int = 2048,
    lr: float = 1e-3,
    device: str = "cuda",
    val_ratio: float = 0.1,
    patience: int = 5,
    seed: int = 42,
    pos_weight: float = 1.0,
):
    set_seed(seed)
    rows = load_rows(csv_path, pos_weight=pos_weight)
    assert len(rows) > 0, f"No data loaded from {csv_path}"

    val_size = int(len(rows) * max(0.0, min(val_ratio, 0.5)))
    if val_size > 0 and len(rows) - val_size < 64:
        val_size = 0
    generator = torch.Generator().manual_seed(seed)
    dataset = BufferDataset(rows)
    if val_size > 0:
        train_size = len(rows) - val_size
        train_ds, val_ds = torch.utils.data.random_split(dataset, [train_size, val_size], generator=generator)
    else:
        train_ds = dataset
        val_ds = None

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=False)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False, drop_last=False) if val_ds else None

    model = BufferGateMLP(in_dim=len(FEATURE_COLS), hidden=[64, 16]).to(device)
    opt = torch.optim.AdamW(model.parameters(), lr=lr)
    loss_fn = nn.BCELoss()

    best_val = float("inf")
    best_state = None
    wait = 0

    for epoch in range(epochs):
        model.train()
        total_loss = 0.0
        total_samples = 0
        for x, y in train_loader:
            x = x.to(device)
            y = y.to(device)
            pred = model(x)
            loss = loss_fn(pred, y)
            opt.zero_grad()
            loss.backward()
            opt.step()
            total_loss += float(loss.item()) * x.shape[0]
            total_samples += x.shape[0]
        train_loss = total_loss / max(1, total_samples)

        if val_loader is not None:
            model.eval()
            val_loss = 0.0
            val_samples = 0
            with torch.no_grad():
                for x, y in val_loader:
                    x = x.to(device)
                    y = y.to(device)
                    pred = model(x)
                    loss = loss_fn(pred, y)
                    val_loss += float(loss.item()) * x.shape[0]
                    val_samples += x.shape[0]
            val_loss /= max(1, val_samples)
            print(f"[epoch {epoch+1}/{epochs}] train={train_loss:.6f} val={val_loss:.6f} Nt={total_samples} Nv={val_samples}")
            if val_loss < best_val - 1e-6:
                best_val = val_loss
                wait = 0
                best_state = {k: v.cpu() for k, v in model.state_dict().items()}
            else:
                wait += 1
                if wait >= max(1, patience):
                    print(f"[early stop] patience reached at epoch {epoch+1}")
                    break
        else:
            print(f"[epoch {epoch+1}/{epochs}] train={train_loss:.6f} Nt={total_samples}")

    if best_state is not None:
        model.load_state_dict(best_state)

    payload = {
        "in_dim": len(FEATURE_COLS),
        "hidden": [64, 16],
        "state_dict": {('net.' + k if not k.startswith('net.') else k): v.cpu() for k, v in model.state_dict().items()},
    }
    os.makedirs(os.path.dirname(out_path), exist_ok=True)
    torch.save(payload, out_path)
    print(f"[OK] Saved buffer gate model to {out_path}")


def main():
    ap = argparse.ArgumentParser("Train buffer gate MLP.")
    ap.add_argument("--csv", type=str, required=True, help="Path to aggregated buffer gate CSV.")
    ap.add_argument("--out", type=str, default="./pretrains/buffer_gate.pt")
    ap.add_argument("--epochs", type=int, default=10)
    ap.add_argument("--batch-size", type=int, default=2048)
    ap.add_argument("--lr", type=float, default=1e-3)
    ap.add_argument("--device", type=str, default="cuda")
    ap.add_argument("--val-ratio", type=float, default=0.1)
    ap.add_argument("--patience", type=int, default=5)
    ap.add_argument("--seed", type=int, default=42)
    ap.add_argument("--pos-weight", type=float, default=1.0, help="Optional duplication factor for positives.")
    args = ap.parse_args()
    train_one(
        csv_path=args.csv,
        out_path=args.out,
        epochs=args.epochs,
        batch_size=args.batch_size,
        lr=args.lr,
        device=args.device,
        val_ratio=args.val_ratio,
        patience=args.patience,
        seed=args.seed,
        pos_weight=args.pos_weight,
    )


if __name__ == "__main__":
    main()
