#!/usr/bin/env python3

"""Derive lifecycle phase labels (cycle_phrase) from daily volume trends.

The script reads one or more CSV files, computes daily message counts from the
``time`` column, and assigns each calendar day to one of four phases:

1. Emergence  - low-volume baseline before the surge begins.
2. Growth     - build-up to the peak once the surge threshold is crossed.
3. Peak       - high-volume plateau around the maximum activity.
4. Decline    - post-peak taper back toward baseline.

The heuristic is configurable via CLI arguments. By default it uses a centered
3-day rolling average to smooth daily volume, identifies a growth start once
smoothed volume exceeds ``baseline * growth_multiplier`` or ``baseline +
growth_offset``, and defines the peak window as days with smoothed counts above
``peak_share`` of the maximum. The decline stage covers the remainder after the
peak window.
"""

from __future__ import annotations

import argparse
from dataclasses import dataclass
from pathlib import Path

import pandas as pd

DEFAULT_TIME_ALIASES = ["time", "publish_time", "发布时间", "時間", "时间"]
EPOCH_SECOND_THRESHOLD = 1_000_000_000  # ~2001-09-09
EPOCH_MILLISECOND_THRESHOLD = 1_000_000_000_000  # ~2001-09-09 with ms precision


@dataclass
class LifecycleThresholds:
    growth_multiplier: float
    growth_offset: float
    peak_share: float


def resolve_time_column(
    df: pd.DataFrame, requested: str, aliases: list[str]
) -> str:
    candidates = []
    if requested:
        candidates.append(requested)
    candidates.extend(aliases)
    candidates.extend(DEFAULT_TIME_ALIASES)
    seen: list[str] = []
    for name in candidates:
        if not name or name in seen:
            continue
        seen.append(name)
        if name in df.columns:
            return name
    raise SystemExit(
        "Unable to locate a datetime column. Tried: "
        f"{', '.join(seen)}. Available columns: {', '.join(df.columns)}"
    )


def _next_available_column(df: pd.DataFrame, base: str) -> str:
    candidate = base
    counter = 1
    while candidate in df.columns:
        candidate = f"{base}_{counter}"
        counter += 1
    return candidate


def _infer_epoch_unit(values: pd.Series) -> str | None:
    numeric = pd.to_numeric(values, errors="coerce")
    numeric = numeric.dropna()
    if numeric.empty:
        return None
    magnitude = numeric.abs().max()
    if magnitude >= EPOCH_MILLISECOND_THRESHOLD:
        return "ms"
    if magnitude >= EPOCH_SECOND_THRESHOLD:
        return "s"
    return None


def _finalize_datetime_column(
    df: pd.DataFrame,
    source_column: str,
    values: pd.Series,
    message: str | None,
    *,
    add_time_alias: bool,
) -> tuple[str, str | None]:
    df[source_column] = values
    target_column = source_column
    if add_time_alias:
        alias_created = False
        if "time" not in df.columns:
            df["time"] = values
            alias_created = True
        if alias_created and source_column != "time":
            target_column = "time"
            if message:
                message = message + " Added 'time' column for downstream scripts."
            else:
                message = "Added 'time' column for downstream scripts."
    return target_column, message


def ensure_datetime_column(df: pd.DataFrame, column_name: str) -> tuple[str, str | None]:
    """Guarantee that the returned column contains real datetimes.

    Returns the column name to use along with an optional log message when a new
    column is created (e.g., when converting Unix epochs).
    """

    epoch_unit = _infer_epoch_unit(df[column_name])
    if epoch_unit:
        numeric = pd.to_numeric(df[column_name], errors="coerce")
        converted = pd.to_datetime(numeric, unit=epoch_unit, errors="coerce")
        if not converted.isna().all():
            message = (
                f"Detected Unix epoch values in '{column_name}' (unit={epoch_unit}) "
                "and converted them to true datetimes."
            )
            return _finalize_datetime_column(
                df,
                column_name,
                converted,
                message,
                add_time_alias=True,
            )

    parsed = pd.to_datetime(df[column_name], errors="coerce")
    if not parsed.isna().all():
        return _finalize_datetime_column(
            df,
            column_name,
            parsed,
            None,
            add_time_alias=False,
        )

    raise ValueError(
        f"Column '{column_name}' could not be parsed as datetime "
        "and no Unix epoch pattern was detected."
    )


def compute_daily_volume(df: pd.DataFrame, time_column: str) -> pd.DataFrame:
    date_series = pd.to_datetime(df[time_column], errors="coerce").dt.date
    if date_series.isna().all():
        raise ValueError(
            f"Column '{time_column}' could not be parsed as datetime for any row."
        )
    df = df.copy()
    df["__date__"] = date_series
    daily = (
        df.dropna(subset=["__date__"])
        .groupby("__date__", as_index=False)
        .size()
        .rename(columns={"__date__": "date", "size": "volume"})
    )
    daily["date"] = pd.to_datetime(daily["date"])
    daily = daily.sort_values("date").reset_index(drop=True)
    daily["volume_ma"] = daily["volume"].rolling(window=3, min_periods=1).mean()
    return daily


def detect_phases(
    daily: pd.DataFrame, thresholds: LifecycleThresholds
) -> pd.Series:
    if daily.empty:
        raise ValueError("No daily volume available to detect phases.")

    baseline = max(1.0, daily["volume_ma"].quantile(0.25))
    growth_threshold = max(
        baseline * thresholds.growth_multiplier, baseline + thresholds.growth_offset
    )
    peak_value_ma = daily["volume_ma"].max()
    peak_value_raw = daily["volume"].max()
    if peak_value_ma <= 0:
        return pd.Series([1] * len(daily), index=daily.index)

    peak_threshold_ma = max(1.0, peak_value_ma * thresholds.peak_share)
    peak_threshold_raw = max(1.0, peak_value_raw * thresholds.peak_share)

    surge_start_idx = next(
        (idx for idx, value in daily["volume_ma"].items() if value >= growth_threshold),
        0,
    )
    peak_indices = [
        idx
        for idx in daily.index
        if daily.loc[idx, "volume_ma"] >= peak_threshold_ma
        or daily.loc[idx, "volume"] >= peak_threshold_raw
    ]
    if not peak_indices:
        peak_indices = [daily["volume_ma"].idxmax()]
    peak_indices = sorted(set(peak_indices))

    peak_max_idx = daily["volume_ma"].idxmax()
    contiguous: list[list[int]] = []
    current_cluster: list[int] = [peak_indices[0]]
    for idx in peak_indices[1:]:
        if idx == current_cluster[-1] + 1:
            current_cluster.append(idx)
        else:
            contiguous.append(current_cluster)
            current_cluster = [idx]
    contiguous.append(current_cluster)

    selected_cluster = next(
        (cluster for cluster in contiguous if peak_max_idx in cluster), contiguous[0]
    )
    peak_start_idx, peak_end_idx = selected_cluster[0], selected_cluster[-1]

    if surge_start_idx >= peak_start_idx:
        surge_start_idx = max(0, peak_start_idx - 1)

    def classify(idx: int) -> int:
        if idx < surge_start_idx:
            return 1
        if idx < peak_start_idx:
            return 2
        if idx <= peak_end_idx:
            if (
                idx > peak_start_idx
                and daily.loc[idx, "volume"] < peak_threshold_raw
            ):
                return 4
            return 3
        return 4

    return daily.index.to_series().map(classify)


def assign_cycle_phrases(
    df: pd.DataFrame,
    time_column: str,
    thresholds: LifecycleThresholds,
    lifecycle_column: str,
) -> tuple[pd.DataFrame, pd.DataFrame]:
    daily = compute_daily_volume(df, time_column=time_column)
    daily["cycle_phrase"] = detect_phases(daily, thresholds=thresholds)

    date_to_cycle = daily.set_index("date")["cycle_phrase"]
    df = df.copy()
    parsed_time = pd.to_datetime(df[time_column], errors="coerce")
    df[lifecycle_column] = parsed_time.dt.normalize().map(date_to_cycle).astype("Int64")
    return df, daily


def process_file(
    input_path: Path,
    output_path: Path,
    thresholds: LifecycleThresholds,
    time_column: str,
    lifecycle_column: str,
    encoding: str,
    time_aliases: list[str],
) -> None:
    df = pd.read_csv(input_path, encoding=encoding)
    resolved_time_column = resolve_time_column(df, time_column, time_aliases)
    resolved_time_column, conversion_note = ensure_datetime_column(
        df, resolved_time_column
    )
    df_with_cycle, daily = assign_cycle_phrases(
        df,
        time_column=resolved_time_column,
        thresholds=thresholds,
        lifecycle_column=lifecycle_column,
    )
    df_with_cycle.to_csv(output_path, index=False, encoding=encoding)

    print(f"Processed {input_path.name}:")
    print(f"- Output written to {output_path.name}")
    if conversion_note:
        print(f"- {conversion_note}")
    print(f"- Volume range: {int(daily['volume'].min())} - {int(daily['volume'].max())}")
    print("- Lifecycle summary (by date):")
    summary = daily.copy()
    summary["date"] = summary["date"].dt.date
    print(summary[["date", "volume", "volume_ma", "cycle_phrase"]].to_string(index=False))
    print()


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--input",
        type=Path,
        required=True,
        help="Path to the source CSV file.",
    )
    parser.add_argument(
        "--output",
        type=Path,
        help="Path for the output CSV (defaults to '<stem>_with_cycle.csv').",
    )
    parser.add_argument(
        "--time-column",
        default="time",
        help="Name of the datetime column used to derive daily volume.",
    )
    parser.add_argument(
        "--time-alias",
        action="append",
        dest="time_aliases",
        help=(
            "Additional column names to try when locating the datetime field "
            "(e.g., 'publish_time', '发布时间')."
        ),
    )
    parser.add_argument(
        "--lifecycle-column",
        default="cycle_phrase",
        help="Name of the lifecycle phase column to create.",
    )
    parser.add_argument(
        "--encoding",
        default="gb18030",
        help="File encoding for reading and writing CSV files.",
    )
    parser.add_argument(
        "--growth-multiplier",
        type=float,
        default=2.0,
        help="Minimum multiple of the baseline needed to enter the growth phase.",
    )
    parser.add_argument(
        "--growth-offset",
        type=float,
        default=5.0,
        help="Additional absolute increase above the baseline required for growth.",
    )
    parser.add_argument(
        "--peak-share",
        type=float,
        default=0.65,
        help="Fraction of the peak (0-1) that defines the peak window.",
    )
    return parser.parse_args()


def main() -> None:
    args = parse_args()
    thresholds = LifecycleThresholds(
        growth_multiplier=args.growth_multiplier,
        growth_offset=args.growth_offset,
        peak_share=args.peak_share,
    )
    input_path: Path = args.input
    if not input_path.exists():
        raise SystemExit(f"Input file not found: {input_path}")
    output_path = args.output or input_path.with_name(f"{input_path.stem}_with_cycle.csv")
    process_file(
        input_path=input_path,
        output_path=output_path,
        thresholds=thresholds,
        time_column=args.time_column,
        time_aliases=[alias.strip() for alias in args.time_aliases or [] if alias],
        lifecycle_column=args.lifecycle_column,
        encoding=args.encoding,
    )


if __name__ == "__main__":
    main()
