#!/usr/bin/env python3
"""Convert all CSV files in `data/` to Parquet files with the same basename.

Behavior:
- Scans the repository `data/` directory for files ending with .csv (case-insensitive).
- For each CSV, writes a same-named .parquet file next to it using Snappy compression.
- Uses polars for speed; falls back to pandas+pyarrow if polars isn't available or fails.

The generated Parquet files are not added to git (the repo `.gitignore` already ignores `data/`).
"""
from __future__ import annotations

import sys
from pathlib import Path
import traceback


DATA_DIR = Path(__file__).resolve().parents[1] / "data"


def convert_with_polars(csv_path: Path, parquet_path: Path) -> None:
    import polars as pl

    print(f"polars: reading {csv_path}")
    # Polars autodetects CSV format and is memory-efficient for large files
    df = pl.read_csv(str(csv_path), try_parse_dates=True)
    print(f"polars: writing {parquet_path}")
    df.write_parquet(str(parquet_path), compression="snappy")


def convert_with_pandas(csv_path: Path, parquet_path: Path) -> None:
    import pandas as pd

    print(f"pandas: reading {csv_path}")
    # let pandas infer dtypes; if file is very large this may be slower
    df = pd.read_csv(csv_path, dtype_backend="pyarrow")
    print(f"pandas: writing {parquet_path}")
    df.to_parquet(parquet_path, engine="pyarrow", compression="snappy", index=False)


def main() -> int:
    if not DATA_DIR.exists():
        print(f"data directory not found: {DATA_DIR}")
        return 1

    csv_files = sorted([p for p in DATA_DIR.rglob("*.csv") if p.is_file()])
    if not csv_files:
        print("no CSV files found under data/")
        return 0

    print(f"found {len(csv_files)} csv file(s) under {DATA_DIR}")

    converted = 0
    for csv in csv_files:
        parquet = csv.with_suffix(".parquet")
        try:
            try:
                convert_with_polars(csv, parquet)
            except Exception:
                print("polars conversion failed, falling back to pandas+pyarrow")
                traceback.print_exc()
                convert_with_pandas(csv, parquet)
            converted += 1
        except Exception:
            print(f"failed to convert {csv} -> {parquet}")
            traceback.print_exc()

    print(f"converted {converted}/{len(csv_files)} files")
    return 0


if __name__ == "__main__":
    raise SystemExit(main())
