#!/usr/bin/env python3
"""
Clean and rebalance exported LWG training samples.

Operations:
  1. Drop rows with missing / invalid numeric values.
  2. Clip to reasonable bounds for area_ratio & aspect_ratio.
  3. Optionally downsample negative samples to control imbalance.
"""

from __future__ import annotations

import argparse
import csv
import math
import random
from pathlib import Path
from typing import List, Tuple

COLUMNS = [
    "seq",
    "t",
    "idx",
    "id_label",
    "track_id",
    "s_det",
    "sim1",
    "margin",
    "area_ratio",
    "aspect_ratio",
    "best_iou",
    "delta_sim1",
    "y",
]


def parse_row(row: List[str]) -> Tuple[dict | None, str | None]:
    """Parse a CSV row, returning (record, error_message)."""
    if len(row) != len(COLUMNS):
        return None, "invalid_column_count"
    record = {}
    try:
        record["seq"] = row[0]
        record["t"] = int(row[1])
        record["idx"] = int(row[2])
        record["id_label"] = int(row[3])
        record["track_id"] = int(row[4])
        record["s_det"] = float(row[5])
        record["sim1"] = float(row[6])
        record["margin"] = float(row[7])
        record["area_ratio"] = float(row[8])
        record["aspect_ratio"] = float(row[9])
        record["best_iou"] = float(row[10])
        record["delta_sim1"] = float(row[11])
        record["y"] = int(row[12])
    except Exception:
        return None, "parse_error"

    for k, v in record.items():
        if isinstance(v, float) and (math.isnan(v) or math.isinf(v)):
            return None, f"invalid_float_{k}"
    if record["y"] not in (0, 1):
        return None, "invalid_label"
    record["best_iou"] = max(0.0, min(1.0, record["best_iou"]))
    if not math.isfinite(record["delta_sim1"]):
        record["delta_sim1"] = 0.0
    return record, None


def clean_records(
    records: List[dict],
    min_area: float,
    max_area: float,
    min_aspect: float,
    max_aspect: float,
) -> Tuple[List[dict], dict]:
    cleaned = []
    stats = {
        "dropped_area": 0,
        "dropped_aspect": 0,
    }
    for rec in records:
        if not (min_area <= rec["area_ratio"] <= max_area):
            stats["dropped_area"] += 1
            continue
        if not (min_aspect <= rec["aspect_ratio"] <= max_aspect):
            stats["dropped_aspect"] += 1
            continue
        cleaned.append(rec)
    return cleaned, stats


def downsample_negatives(records: List[dict], max_ratio: float, seed: int) -> Tuple[List[dict], dict]:
    stats = {
        "neg_kept": 0,
        "neg_dropped": 0,
    }
    positives = [rec for rec in records if rec["y"] == 1]
    negatives = [rec for rec in records if rec["y"] == 0]
    if not positives or not negatives:
        stats["neg_kept"] = len(negatives)
        return records, stats

    max_neg = int(len(positives) * max_ratio)
    if max_neg <= 0 or len(negatives) <= max_neg:
        stats["neg_kept"] = len(negatives)
        return records, stats

    random.Random(seed).shuffle(negatives)
    kept_negatives = negatives[:max_neg]
    stats["neg_kept"] = len(kept_negatives)
    stats["neg_dropped"] = len(negatives) - len(kept_negatives)
    combined = positives + kept_negatives
    random.Random(seed).shuffle(combined)
    return combined, stats


def write_csv(path: Path, records: List[dict]) -> None:
    path.parent.mkdir(parents=True, exist_ok=True)
    with path.open("w", newline="") as f:
        writer = csv.writer(f)
        writer.writerow(COLUMNS)
        for rec in records:
            writer.writerow([
                rec["seq"],
                rec["t"],
                rec["idx"],
                rec["id_label"],
                rec["track_id"],
                f"{rec['s_det']:.6f}",
                f"{rec['sim1']:.6f}",
                f"{rec['margin']:.6f}",
                f"{rec['area_ratio']:.6f}",
                f"{rec['aspect_ratio']:.6f}",
                f"{rec['best_iou']:.6f}",
                f"{rec['delta_sim1']:.6f}",
                rec["y"],
            ])


def main() -> None:
    ap = argparse.ArgumentParser("Clean and rebalance LWG training CSV.")
    ap.add_argument("--input", required=True, help="Input CSV path.")
    ap.add_argument("--output", required=True, help="Output CSV path.")
    ap.add_argument("--min-area-ratio", type=float, default=5e-5, help="Minimum allowed area_ratio.")
    ap.add_argument("--max-area-ratio", type=float, default=0.98, help="Maximum allowed area_ratio.")
    ap.add_argument("--min-aspect-ratio", type=float, default=0.05, help="Minimum allowed aspect_ratio.")
    ap.add_argument("--max-aspect-ratio", type=float, default=20.0, help="Maximum allowed aspect_ratio.")
    ap.add_argument("--max-neg-pos-ratio", type=float, default=4.0, help="Downsample negatives to this ratio.")
    ap.add_argument("--seed", type=int, default=42)
    args = ap.parse_args()

    input_path = Path(args.input)
    records: List[dict] = []
    dropped_counts = {
        "invalid_rows": 0,
        "parse_error": 0,
    }
    with input_path.open() as f:
        reader = csv.reader(f)
        header = next(reader, None)
        if header is None:
            raise ValueError(f"Unexpected header in {input_path}")
        if len(header) != len(COLUMNS):
            raise ValueError(f"Unexpected header columns in {input_path}")
        for row in reader:
            rec, err = parse_row(row)
            if rec is None:
                dropped_counts["invalid_rows"] += 1
                if err:
                    dropped_counts[err] = dropped_counts.get(err, 0) + 1
                continue
            records.append(rec)

    cleaned_records, clean_stats = clean_records(
        records,
        min_area=args.min_area_ratio,
        max_area=args.max_area_ratio,
        min_aspect=args.min_aspect_ratio,
        max_aspect=args.max_aspect_ratio,
    )

    balanced_records, balance_stats = downsample_negatives(
        cleaned_records,
        max_ratio=args.max_neg_pos_ratio,
        seed=args.seed,
    )

    write_csv(Path(args.output), balanced_records)

    num_pos = sum(1 for r in balanced_records if r["y"] == 1)
    num_neg = sum(1 for r in balanced_records if r["y"] == 0)
    print(f"[clean_dataset] loaded={len(records)} kept={len(balanced_records)} pos={num_pos} neg={num_neg}")
    print(f"[clean_dataset] drop_invalid={dropped_counts['invalid_rows']} "
          f"drop_area={clean_stats['dropped_area']} drop_aspect={clean_stats['dropped_aspect']} "
          f"neg_dropped={balance_stats['neg_dropped']}")


if __name__ == "__main__":
    main()
