﻿#!/usr/bin/env python3

"""Analyze BEND label distribution across position and cycle_phrase."""

import argparse
import ast
from collections import Counter
from pathlib import Path
from typing import Iterable

import pandas as pd


def parse_bend(cell: object) -> list[str]:
    """Return a list of BEND labels from a dataframe cell."""
    if pd.isna(cell):
        return []
    text = str(cell).strip()
    if not text:
        return []
    try:
        value = ast.literal_eval(text)
    except (ValueError, SyntaxError):
        return [item.strip() for item in text.split(',') if item.strip()]
    if isinstance(value, str):
        return [value.strip()] if value.strip() else []
    try:
        return [str(item).strip() for item in value if str(item).strip()]
    except TypeError:
        return [str(value).strip()]


def format_cycle_label(cycle: object) -> str:
    if pd.isna(cycle):
        return "Unknown"
    try:
        as_float = float(cycle)
    except (TypeError, ValueError):
        return str(cycle)
    if as_float.is_integer():
        return str(int(as_float))
    return str(as_float)


def build_report(df: pd.DataFrame, top_n: int | None) -> str:
    result_lines: list[str] = []
    grouped = df.groupby(["position", "cycle_phrase"], dropna=False)

    def sort_key(key: tuple[object, object]):
        position, cycle = key
        pos_key = "" if pd.isna(position) else str(position)
        try:
            cycle_key = float(cycle)
        except (TypeError, ValueError):
            cycle_key = float("inf") if pd.isna(cycle) else float("inf")
        return (pos_key.lower(), cycle_key)

    for (position, cycle), group in sorted(grouped, key=lambda item: sort_key(item[0])):
        record_count = len(group)
        counter: Counter[str] = Counter()
        for cell in group["BEND"]:
            counter.update(parse_bend(cell))

        total_labels = sum(counter.values())
        position_label = "Unknown" if pd.isna(position) else str(position)
        cycle_label = format_cycle_label(cycle)
        result_lines.append(
            f"Position={position_label}, cycle_phrase={cycle_label} (records: {record_count})"
        )

        if total_labels == 0:
            result_lines.append("  No BEND labels available.")
            result_lines.append("")
            continue

        limit = None if top_n is None or top_n <= 0 else top_n
        items = counter.most_common(limit)
        for label, count in items:
            pct = (count / total_labels) * 100
            result_lines.append(f"  - {label}: {count} ({pct:.1f}% of {total_labels})")
        if limit is not None and limit < len(counter):
            remaining = total_labels - sum(count for _, count in items)
            result_lines.append(
                f"  + {len(counter) - limit} more labels accounting for {remaining} occurrences"
            )
        result_lines.append("")

    return "\n".join(result_lines).rstrip()


def discover_input_files(requested: Iterable[Path] | None) -> list[Path]:
    if requested:
        files: list[Path] = []
        for path in requested:
            expanded = path.expanduser()
            if expanded.is_dir():
                files.extend(sorted(expanded.glob("*.csv")))
            else:
                files.append(expanded)
        return files
    return sorted(Path.cwd().glob("*.csv"))


def ensure_columns(df: pd.DataFrame) -> None:
    required = ["position", "cycle_phrase", "BEND"]
    for column in required:
        if column not in df.columns:
            df[column] = pd.NA


def process_file(input_path: Path, encoding: str, top_n: int | None) -> Path:
    df = pd.read_csv(input_path, encoding=encoding)
    ensure_columns(df)
    if "cycle_phrase" in df.columns:
        df["cycle_phrase"] = pd.to_numeric(df["cycle_phrase"], errors="coerce")
    report = build_report(df, top_n=top_n)
    output_path = input_path.with_name(f"{input_path.stem}_bend_distribution_report.txt")
    output_path.write_text(report, encoding="utf-8")
    print(f"=== Report for {input_path.name} ===")
    print(report)
    print(f"Report saved to {output_path}")
    print()
    return output_path


def main() -> None:
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--input",
        action="append",
        type=Path,
        help=(
            "Path to a CSV dataset. Repeat the option to process multiple files. "
            "Default: all CSV files in the current directory."
        ),
    )
    parser.add_argument(
        "--encoding",
        default="gb18030",
        help="File encoding for reading and writing the CSV.",
    )
    parser.add_argument(
        "--top",
        type=int,
        default=0,
        help="Limit the number of BEND labels to show per group (0 for all).",
    )
    args = parser.parse_args()

    input_files = discover_input_files(args.input)
    if not input_files:
        raise SystemExit("No CSV files found to process.")

    for input_path in input_files:
        if not input_path.exists():
            print(f"Skipping {input_path}: file not found.")
            continue
        if input_path.is_dir():
            print(f"Skipping {input_path}: directory provided; pass files or rely on automatic discovery.")
            continue
        process_file(input_path, encoding=args.encoding, top_n=args.top)


if __name__ == "__main__":
    main()
