#!/usr/bin/env python3

"""Clean opinion-monitoring CSV exports prior to lifecycle analysis.

The script removes rows where the ``position`` column is blank or belongs to a
user-specified exclusion list (defaults: empty string, ``其他``, ``other``),
optionally normalizes the column by trimming whitespace, and writes the result
to a new CSV file.
"""

from __future__ import annotations

import argparse
from pathlib import Path
from typing import Iterable

import pandas as pd


DEFAULT_EXCLUSIONS = {"", "其他", "other", "未知", "媒体"}


def resolve_position_column(
    df: pd.DataFrame, candidates: list[str], target: str
) -> tuple[pd.DataFrame, str]:
    """Rename the first matching column in candidates to target."""
    seen: list[str] = []
    for name in candidates:
        if not name or name in seen:
            continue
        seen.append(name)
        if name in df.columns:
            if name != target:
                df = df.rename(columns={name: target})
            return df, target
    raise SystemExit(
        f"Unable to locate a stance column. Tried: {', '.join(seen)}. "
        f"Available columns: {', '.join(df.columns)}"
    )


def normalize_position(value: object) -> str:
    if pd.isna(value):
        return ""
    text = str(value).strip()
    return text


def build_exclusion_set(values: Iterable[str]) -> set[str]:
    exclusion_set: set[str] = set()
    for value in values:
        cleaned = value.strip()
        if cleaned:
            exclusion_set.add(cleaned.lower())
    return exclusion_set


def drop_invalid_positions(
    df: pd.DataFrame, column: str, exclusions: set[str]
) -> pd.DataFrame:
    df = df.copy()
    normalized = df[column].apply(normalize_position)
    df[column] = normalized
    mask_valid = normalized.apply(lambda v: v.lower() not in exclusions and v != "")
    return df[mask_valid]


def process_file(
    input_path: Path,
    output_path: Path,
    position_column: str,
    exclusions: set[str],
    encoding: str,
    position_aliases: list[str],
) -> None:
    df = pd.read_csv(input_path, encoding=encoding)
    candidates = [position_column, *position_aliases, "position", "立场"]
    df, resolved_column = resolve_position_column(df, candidates, target="position")
    original_count = len(df)
    cleaned = drop_invalid_positions(df, column="position", exclusions=exclusions)
    cleaned.to_csv(output_path, index=False, encoding=encoding)
    removed = original_count - len(cleaned)
    print(f"Processed {input_path.name}:")
    print(f"- Removed {removed} rows due to excluded positions")
    print(f"- Retained {len(cleaned)} rows")
    print(f"- Output written to {output_path.name}")


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--input",
        type=Path,
        required=True,
        help="Path to the source CSV file.",
    )
    parser.add_argument(
        "--output",
        type=Path,
        help="Path for the cleaned CSV (defaults to '<stem>_clean.csv').",
    )
    parser.add_argument(
        "--encoding",
        default="gb18030",
        help="Encoding for reading and writing CSV files.",
    )
    parser.add_argument(
        "--position-column",
        default="position",
        help="Name of the stance column to clean.",
    )
    parser.add_argument(
        "--position-alias",
        action="append",
        dest="position_aliases",
        help=(
            "Additional column names to consider for stance detection. "
            "Useful when the source uses labels like '立场'."
        ),
    )
    parser.add_argument(
        "--drop-position",
        action="append",
        dest="drops",
        help=(
            "Position labels to remove (case-insensitive). "
            "Repeat for multiple values. Defaults: '', '其他', 'other'."
        ),
    )
    return parser.parse_args()


def main() -> None:
    args = parse_args()
    exclusions = DEFAULT_EXCLUSIONS.copy()
    if args.drops:
        exclusions.update(value.strip().lower() for value in args.drops if value)

    input_path: Path = args.input
    if not input_path.exists():
        raise SystemExit(f"Input file not found: {input_path}")
    output_path = args.output or input_path.with_name(f"{input_path.stem}_clean.csv")
    requested_column = args.position_column.strip() if args.position_column else "position"
    alias_list = [alias.strip() for alias in args.position_aliases or [] if alias]

    process_file(
        input_path=input_path,
        output_path=output_path,
        position_column=requested_column or "position",
        position_aliases=alias_list,
        exclusions=build_exclusion_set(exclusions),
        encoding=args.encoding,
    )


if __name__ == "__main__":
    main()
