﻿#!/usr/bin/env python3



"""End-to-end analytics generator for the TW Outage opinion dataset.



The script expects a cleaned CSV that already includes lifecycle labels

(`cycle_phrase`). It produces every derived artifact needed for reporting and

creates a comprehensive Markdown summary of the metrics.



Artifacts written to the chosen output directory (defaults to `analysis_data/`):



* Cleaned dataset (pass-through if already generated upstream)

* `daily_volume_summary.csv`

* `stance_mix_summary.csv`

* `lifecycle_stance_participation.csv`

* `lifecycle_stance_bend_summary.csv` (or `lifecycle_bend_summary.csv` when stance is ignored)

* `representative_posts_top10_per_phase.csv`

* `ANALYSIS_RESULTS_SUMMARY.md`

"""



from __future__ import annotations



import argparse

import ast

from collections import defaultdict

from dataclasses import dataclass

from pathlib import Path

from textwrap import dedent

from typing import Iterable



import pandas as pd





STANCE_MAP = {"\u652f\u6301": "Support", "\u4e2d\u7acb": "Neutral", "\u53cd\u5bf9": "Oppose"}

PHASE_MAP = {1: "Emergence", 2: "Growth", 3: "Peak", 4: "Decline"}

PHASE_ORDER = ["Emergence", "Growth", "Peak", "Decline"]





@dataclass

class DatasetArtifacts:

    input_path: Path

    output_dir: Path

    encoding: str

    top_posts_per_phase: int

    time_candidates: tuple[str, ...]

    position_candidates: tuple[str, ...]

    like_candidates: tuple[str, ...]

    comment_candidates: tuple[str, ...]

    forward_candidates: tuple[str, ...]

    viewpoint_candidates: tuple[str, ...]

    group_mode: str





def ensure_dir(path: Path) -> None:

    path.mkdir(parents=True, exist_ok=True)





def build_candidates(

    primary: str | None, extras: Iterable[str], defaults: Iterable[str]

) -> tuple[str, ...]:

    values: list[str] = []

    for value in [primary, *extras, *defaults]:

        if value and value not in values:

            values.append(value)

    return tuple(values)





def ensure_column(

    df: pd.DataFrame,

    target: str,

    candidates: Iterable[str],

    *,

    required: bool,

    fill_value: object | None = None,

) -> pd.DataFrame:

    seen: list[str] = []

    for name in candidates:

        if not name or name in seen:

            continue

        seen.append(name)

        if name in df.columns:

            if name != target:

                if target in df.columns:

                    target_series = df[target]

                    replacement = df[name]

                    if pd.api.types.is_object_dtype(target_series) or pd.api.types.is_string_dtype(target_series):

                        df[target] = target_series.fillna(replacement)

                    else:

                        df[target] = target_series.fillna(replacement)

                    df = df.drop(columns=[name])

                else:

                    df = df.rename(columns={name: target})

            return df

    if required:

        tried = ", ".join(seen)

        available = ", ".join(df.columns)

        raise ValueError(

            f"Required column '{target}' not found. Tried: {tried}. "

            f"Available columns: {available}"

        )

    if target not in df.columns:

        df[target] = fill_value

    return df





def normalize_input_columns(df: pd.DataFrame, artifacts: DatasetArtifacts) -> pd.DataFrame:

    df_local = df.copy()

    df_local = ensure_column(

        df_local, "time", artifacts.time_candidates, required=True

    )

    df_local = ensure_column(

        df_local, "position", artifacts.position_candidates, required=True

    )

    df_local = ensure_column(

        df_local, "Like", artifacts.like_candidates, required=False, fill_value=0

    )

    df_local = ensure_column(

        df_local, "Comment", artifacts.comment_candidates, required=False, fill_value=0

    )

    df_local = ensure_column(

        df_local, "Forward", artifacts.forward_candidates, required=False, fill_value=0

    )

    df_local = ensure_column(

        df_local, "Viewpoint", artifacts.viewpoint_candidates, required=False, fill_value=""

    )

    df_local = ensure_column(

        df_local, "BEND", ["BEND"], required=False, fill_value=""

    )

    df_local = ensure_column(

        df_local, "BEND_Reasoning", ["BEND_Reasoning"], required=False, fill_value=""

    )

    return df_local





def parse_bend(cell: object) -> list[str]:

    if pd.isna(cell):

        return []

    text = str(cell).strip()

    if not text:

        return []

    try:

        value = ast.literal_eval(text)

    except (SyntaxError, ValueError):

        return [item.strip() for item in text.split(",") if item.strip()]

    if isinstance(value, (list, tuple, set)):

        return [str(item).strip() for item in value if str(item).strip()]

    if isinstance(value, str):

        return [value.strip()] if value.strip() else []

    return [str(value).strip()]





def normalize_stance(series: pd.Series) -> pd.Series:

    return series.map(STANCE_MAP).fillna(series)





def normalize_phase(series: pd.Series) -> pd.Series:

    return series.map(PHASE_MAP).fillna(series)





def to_numeric(series: pd.Series) -> pd.Series:

    return pd.to_numeric(series, errors="coerce").fillna(0)





def compute_daily_volume(df: pd.DataFrame, artifacts: DatasetArtifacts) -> pd.DataFrame:

    if "time" not in df.columns:

        raise ValueError("Column 'time' is required for daily volume calculations.")

    dates = pd.to_datetime(df["time"], errors="coerce").dt.date

    if dates.isna().all():

        raise ValueError("Unable to parse any timestamps from the 'time' column.")

    summary = (

        df.assign(date=dates)

        .dropna(subset=["date"])

        .groupby(["date", "cycle_phrase"], dropna=False)

        .size()

        .reset_index(name="messages")

    )

    summary["date"] = pd.to_datetime(summary["date"])

    summary = summary.sort_values("date")

    total = summary["messages"].sum()

    summary["share_pct"] = (summary["messages"] / total * 100).round(2)

    summary["cycle_label"] = normalize_phase(summary["cycle_phrase"])

    ordered = summary[

        ["date", "cycle_phrase", "cycle_label", "messages", "share_pct"]

    ]

    ordered.to_csv(artifacts.output_dir / "daily_volume_summary.csv", index=False, encoding="utf-8")

    return ordered





def compute_stance_mix(

    df: pd.DataFrame, artifacts: DatasetArtifacts

) -> pd.DataFrame | None:

    if artifacts.group_mode != "lifecycle-stance-bend":

        return None

    if "position" not in df.columns:

        raise ValueError("Column 'position' is required for stance calculations.")



    normalized = normalize_stance(df["position"])



    mix = (

        normalized

        .value_counts()

        .rename_axis("stance")

        .reset_index(name="messages")

    )

    total = mix["messages"].sum()

    mix["share_pct"] = (mix["messages"] / total * 100).round(2)

    mix.to_csv(artifacts.output_dir / "stance_mix_summary.csv", index=False, encoding="utf-8")

    return mix.sort_values("messages", ascending=False)





def compute_lifecycle_stance(

    df: pd.DataFrame, artifacts: DatasetArtifacts

) -> pd.DataFrame | None:

    if artifacts.group_mode != "lifecycle-stance-bend":

        return None

    if "cycle_phrase" not in df.columns:

        raise ValueError("Column 'cycle_phrase' required for lifecycle analysis.")



    stance = normalize_stance(df["position"])

    phase = normalize_phase(df["cycle_phrase"])

    augmented = df.assign(stance=stance, phase=phase)

    if "id" in df.columns:

        counts = augmented.pivot_table(

            index="phase",

            columns="stance",

            values="id",

            aggfunc="count",

            fill_value=0,

        )

    else:

        counts = (

            augmented.groupby(["phase", "stance"])

            .size()

            .unstack(fill_value=0)

        )

    counts = counts.reindex(PHASE_ORDER)



    phase_totals = counts.sum(axis=1)

    overall_total = phase_totals.sum()

    share = (counts.div(phase_totals, axis=0) * 100).round(2)

    share = share.add_suffix(" (%)")

    combined = pd.concat([counts, share], axis=1)

    combined["phase_total"] = phase_totals.astype(int)

    combined["phase_share_pct"] = (phase_totals / overall_total * 100).round(2)

    combined.to_csv(

        artifacts.output_dir / "lifecycle_stance_participation.csv",

        encoding="utf-8",

    )

    combined_reset = combined.reset_index().rename(columns={"index": "phase"})

    return combined_reset





def compute_bend_outputs(

    df: pd.DataFrame, artifacts: DatasetArtifacts

) -> tuple[pd.DataFrame, list[str]]:

    if "BEND" not in df.columns:

        raise ValueError("Column 'BEND' required for narrative summaries.")



    use_stance = artifacts.group_mode == "lifecycle-stance-bend"

    group_fields = ["position", "cycle_phrase"] if use_stance else ["cycle_phrase"]

    rows: list[dict[str, object]] = []

    groups = df.groupby(group_fields, dropna=False)

    for key, group in groups:

        if use_stance:

            stance_raw, cycle = key  # type: ignore[misc]

            stance_label = STANCE_MAP.get(stance_raw, stance_raw)

        else:

            cycle = key[0] if isinstance(key, tuple) else key

            stance_label = None

        cycle_label = PHASE_MAP.get(cycle, cycle)

        tags: list[str] = []

        for cell in group["BEND"]:

            tags.extend(parse_bend(cell))

        if not tags:

            continue

        counts = pd.Series(tags).value_counts()

        total_tags = int(counts.sum())

        top_counts = counts.head(10)

        pct = (top_counts / total_tags * 100).round(1)

        record: dict[str, object] = {

            "cycle_label": cycle_label,

            "cycle_phrase": cycle,

            "records": len(group),

            "total_tags": total_tags,

        }

        if use_stance:

            record["stance_label"] = stance_label

        for idx, (label, count) in enumerate(top_counts.items(), start=1):

            record[f"top{idx}_label"] = label

            record[f"top{idx}_count"] = int(count)

            record[f"top{idx}_pct"] = float(pct[label])

        rows.append(record)

    if rows:

        summary = pd.DataFrame(rows)

    else:

        base_columns = ["cycle_label", "cycle_phrase"]

        if use_stance:

            base_columns.append("stance_label")

        base_columns.extend(["records", "total_tags"])

        summary = pd.DataFrame(columns=base_columns)

    filename = (

        "lifecycle_stance_bend_summary.csv"

        if use_stance

        else "lifecycle_bend_summary.csv"

    )

    summary.to_csv(

        artifacts.output_dir / filename,

        index=False,

        encoding="utf-8",

    )

    if not summary.empty:

        summary["cycle_label"] = pd.Categorical(

            summary["cycle_label"], categories=PHASE_ORDER, ordered=True

        )

        sort_columns = ["cycle_label", "stance_label"] if use_stance else ["cycle_label"]

        summary = summary.sort_values(sort_columns)

        summary["cycle_label"] = summary["cycle_label"].astype(str)

    highlight_lines: list[str] = []

    for _, row in summary.iterrows():

        bullets: list[str] = []

        for i in range(1, 4):

            label = row.get(f"top{i}_label")

            pct_value = row.get(f"top{i}_pct")

            if pd.notna(label) and pd.notna(pct_value):

                bullets.append(f"{label} {pct_value:.1f}%")

        if bullets:

            prefix_parts = [row["cycle_label"]]

            if use_stance and pd.notna(row.get("stance_label")):

                prefix_parts.append(row["stance_label"])

            prefix = " | ".join(prefix_parts)

            highlight_lines.append(

                f"{prefix} | {int(row['total_tags'])} tags | " + ", ".join(bullets)

            )

    return summary, highlight_lines









def compute_representative_posts(

    df: pd.DataFrame, artifacts: DatasetArtifacts

) -> pd.DataFrame:

    required = {"Like", "Comment", "Forward", "content", "Viewpoint"}

    missing = [col for col in required if col not in df.columns]

    if missing:

        raise ValueError(

            f"Missing columns for representative post extraction: {', '.join(missing)}"

        )



    df_local = df.copy()

    df_local["phase"] = normalize_phase(df_local["cycle_phrase"])

    df_local["stance"] = normalize_stance(df_local["position"])

    df_local["Like"] = to_numeric(df_local["Like"])

    df_local["Comment"] = to_numeric(df_local["Comment"])

    df_local["Forward"] = to_numeric(df_local["Forward"])

    df_local["interactions"] = df_local["Like"] + df_local["Comment"] + df_local["Forward"]



    records: list[dict[str, object]] = []

    for phase, group in df_local.groupby("phase"):

        top = group.sort_values("interactions", ascending=False).head(artifacts.top_posts_per_phase)

        for _, row in top.iterrows():

            snippet = str(row.get("content", "")).replace("\n", " ").strip()

            viewpoint = str(row.get("Viewpoint", "")).replace("\n", " ").strip()

            bend_text = str(row.get("BEND", "")).replace("\n", " ").strip()

            bend_reason = str(row.get("BEND_Reasoning", "")).replace("\n", " ").strip()

            records.append(

                {

                    "phase": phase,

                    "stance": row.get("stance"),

                    "time": row.get("time"),

                    "interactions": int(row.get("interactions", 0)),

                    "like": int(row.get("Like", 0)),

                    "comment": int(row.get("Comment", 0)),

                    "forward": int(row.get("Forward", 0)),

                    "content_snippet": snippet,

                    "viewpoint_snippet": viewpoint,

                    "bend": bend_text,

                    "bend_reasoning": bend_reason,

                }

            )



    reps = pd.DataFrame(records)

    reps.to_csv(

        artifacts.output_dir / "representative_posts_top10_per_phase.csv",

        index=False,

        encoding="utf-8",

    )

    return reps





def build_markdown(

    artifacts: DatasetArtifacts,

    daily: pd.DataFrame,

    stance_mix: pd.DataFrame | None,

    lifecycle: pd.DataFrame | None,

    bend_summary: pd.DataFrame,

    bend_highlights: list[str],

    representative_posts: pd.DataFrame,

) -> str:

    total_messages = int(daily["messages"].sum())

    phase_totals = (

        daily.groupby("cycle_label")["messages"]

        .sum()

        .reindex(PHASE_ORDER)

        .dropna()

    )



    top_days_raw = daily.sort_values("messages", ascending=False).head(5)

    top_days_display = top_days_raw.copy()

    top_days_display["Date"] = top_days_display["date"].dt.strftime("%Y-%m-%d")

    top_days_display.rename(

        columns={

            "cycle_label": "Phase",

            "messages": "Messages",

            "share_pct": "Share %",

        },

        inplace=True,

    )

    top_days_display["Messages"] = top_days_display["Messages"].astype(int)

    top_days_display["Share %"] = top_days_display["Share %"].map(lambda x: f"{x:.2f}%")



    surge_day = daily.loc[daily["messages"] > 20].sort_values("date").head(1)

    peak_day = top_days_raw.head(1)

    decline_day = (

        daily[daily["cycle_label"] == "Decline"].sort_values("date").head(1)

    )



    def md_table(df: pd.DataFrame, columns: list[str]) -> str:

        if df.empty:

            return "_No data available._"

        header = "| " + " | ".join(columns) + " |"

        divider = "| " + " | ".join(["---"] * len(columns)) + " |"

        body_lines = []

        for _, row in df[columns].iterrows():

            body_lines.append("| " + " | ".join(str(item) for item in row) + " |")

        return "\n".join([header, divider, *body_lines])



    if stance_mix is not None and not stance_mix.empty:

        stance_table = stance_mix.assign(

            share_pct=stance_mix["share_pct"].map(lambda x: f"{x:.2f}%")

        ).rename(

            columns={"stance": "Stance", "messages": "Messages", "share_pct": "Share %"}

        )

        stance_section = [

            "## Stance Distribution",

            md_table(stance_table, list(stance_table.columns)),

        ]

    else:

        note = (

            "_Skipped because group mode = lifecycle-bend._"

            if artifacts.group_mode == "lifecycle-bend"

            else "_Stance data unavailable._"

        )

        stance_section = [

            "## Stance Distribution",

            note,

        ]

    if lifecycle is not None and not lifecycle.empty:

        lifecycle_table = lifecycle.copy().rename(

            columns={

                "phase": "Phase",

                "Support": "Support",

                "Support (%)": "Support %",

                "Neutral": "Neutral",

                "Neutral (%)": "Neutral %",

                "Oppose": "Oppose",

                "Oppose (%)": "Oppose %",

                "phase_total": "Total",

                "phase_share_pct": "Phase %",

            }

        )

        percentage_cols = [col for col in lifecycle_table.columns if col.endswith("%")]

        for col in percentage_cols:

            lifecycle_table[col] = lifecycle_table[col].map(lambda x: f"{x}" if isinstance(x, str) and x.endswith("%") else (f"{x:.2f}%" if pd.notna(x) else ""))

        lifecycle_section = [

            "## Lifecycle x Stance Participation",

            md_table(

                lifecycle_table[

                    ["Phase", "Support", "Support %", "Neutral", "Neutral %", "Oppose", "Oppose %", "Total", "Phase %"]

                ],

                ["Phase", "Support", "Support %", "Neutral", "Neutral %", "Oppose", "Oppose %", "Total", "Phase %"],

            ),

        ]

    else:

        note = (

            "_Lifecycle x stance table skipped (group mode = lifecycle-bend)._"

            if artifacts.group_mode == "lifecycle-bend"

            else "_Lifecycle data unavailable._"

        )

        lifecycle_section = [

            "## Lifecycle Participation",

            note,

        ]



    reps_by_phase = defaultdict(list)

    for _, row in representative_posts.iterrows():

        reps_by_phase[row["phase"]].append(row)



    timeline_section = [

        "## Timeline Overview",

        f"- Total cleaned messages: {total_messages}.",

        "- Phase totals: " + ", ".join(

            f"{phase} {int(count)} msgs ({count/total_messages*100:.2f}%)"

            for phase, count in phase_totals.items()

        )

    ]

    def format_event(df: pd.DataFrame, label: str) -> str:

        if df.empty:

            return f"- {label}: not detected."

        r = df.iloc[0]

        return (

            f"- {label}: {r['date'].date()} "

            f"({int(r['messages'])} msgs, {r['cycle_label']}, {r['share_pct']:.2f}% share)."

        )



    timeline_section.extend(

        [

            format_event(surge_day, "First notable surge (>20 msgs)"),

            format_event(peak_day, "Absolute peak"),

            format_event(decline_day, "Initial decline day"),

        ]

    )

    timeline_section.append("\nTop 5 volume days:")

    top_table = top_days_display[["Date", "Phase", "Messages", "Share %"]]

    timeline_section.append(md_table(top_table, list(top_table.columns)))



    bend_section = ["## Narrative Highlights (BEND)"]

    if artifacts.group_mode == "lifecycle-bend":

        bend_section.append(

            "_Lifecycle-only aggregation active; see `lifecycle_bend_summary.csv` for full tag statistics._"

        )

    if bend_highlights:

        bend_section.extend(f"- {line}" for line in bend_highlights)

    else:

        bend_section.append("_No BEND data available._")



    reps_section = ["## Representative Content"]

    if reps_by_phase:

        for phase in PHASE_ORDER:

            rows = reps_by_phase.get(phase, [])

            if not rows:

                continue

            reps_section.append(f"- **{phase}**:")

            for row in rows[:2]:

                snippet = row["content_snippet"] or "(content unavailable)"

                bend_info = row.get("bend") or ""

                reasoning_info = row.get("bend_reasoning") or ""

                extras: list[str] = []

                if bend_info:

                    extras.append(f"BEND: {bend_info}")

                if reasoning_info:

                    extras.append(f"Reasoning: {reasoning_info}")

                extra_text = f" [{'; '.join(extras)}]" if extras else ""

                reps_section.append(

                    f"  - {row['time']} | {row['stance']} | {row['interactions']} interactions "

                    f"(Likes {row['like']}, Comments {row['comment']}, Forwards {row['forward']}): {snippet}{extra_text}"

                )

    else:

        reps_section.append("_No representative posts extracted (missing interaction columns)._")



    files_section = [

        "## Files Generated",

        "- daily_volume_summary.csv",

    ]

    if artifacts.group_mode == "lifecycle-stance-bend":

        files_section.append("- stance_mix_summary.csv")

        files_section.append("- lifecycle_stance_participation.csv")

        files_section.append("- lifecycle_stance_bend_summary.csv")

    else:

        files_section.append("- lifecycle_bend_summary.csv")

    files_section.extend(

        [

            "- representative_posts_top10_per_phase.csv",

            "- ANALYSIS_RESULTS_SUMMARY.md (this file)",

        ]

    )



    sections = [

        "# Analysis Summary",

        "All metrics were generated offline using `analysis_scripts/build_metrics.py`.",

        *timeline_section,

        *stance_section,

        *lifecycle_section,

        *bend_section,

        *reps_section,

        *files_section,

    ]

    return "\n\n".join(sections).strip() + "\n"





def parse_args(args: Iterable[str] | None = None) -> argparse.Namespace:

    parser = argparse.ArgumentParser(description=__doc__)

    parser.add_argument(

        "--input",

        required=True,

        type=Path,

        help="Cleaned dataset with lifecycle labels (e.g., TW_Outage_Opinion_clean_with_cycle.csv).",

    )

    parser.add_argument(

        "--output-dir",

        type=Path,

        default=Path("analysis_data"),

        help="Directory for generated artifacts (default: analysis_data/).",

    )

    parser.add_argument(

        "--encoding",

        default="gb18030",

        help="File encoding used to read the dataset.",

    )

    parser.add_argument(

        "--time-column",

        default="time",

        help="Primary timestamp column name (e.g., 'time', 'publish_time').",

    )

    parser.add_argument(

        "--time-alias",

        action="append",

        dest="time_aliases",

        help="Additional timestamp column names to try.",

    )

    parser.add_argument(

        "--position-column",

        default="position",

        help="Primary stance column name (e.g., 'position', '立场').",

    )

    parser.add_argument(

        "--position-alias",

        action="append",

        dest="position_aliases",

        help="Additional stance column names to try.",

    )

    parser.add_argument(

        "--like-column",

        help="Column storing like counts (fallbacks include 'Like', 'likes_count').",

    )

    parser.add_argument(

        "--comment-column",

        help="Column storing comment counts (fallbacks include 'Comment', 'comments_count').",

    )

    parser.add_argument(

        "--forward-column",

        help="Column storing share/forward counts (fallbacks include 'Forward', 'share_count').",

    )

    parser.add_argument(

        "--viewpoint-column",

        help="Column storing viewpoint or summary text (fallbacks include 'Viewpoint', '观点').",

    )

    parser.add_argument(

        "--top-posts",

        type=int,

        default=10,

        help="Number of high-interaction posts to capture per phase (default: 10).",

    )

    parser.add_argument(

        "--group-mode",

        choices=("lifecycle-stance-bend", "lifecycle-bend"),

        default="lifecycle-stance-bend",

        help=(

            "Grouping granularity for lifecycle/BEND outputs. "

            "Choose 'lifecycle-stance-bend' (default) to keep stance dimension "

            "or 'lifecycle-bend' to ignore stance."

        ),

    )

    return parser.parse_args(args=args)





def run_pipeline(artifacts: DatasetArtifacts) -> None:

    ensure_dir(artifacts.output_dir)

    df_raw = pd.read_csv(artifacts.input_path, encoding=artifacts.encoding)

    df = normalize_input_columns(df_raw, artifacts)



    daily = compute_daily_volume(df, artifacts)

    stance_mix = compute_stance_mix(df, artifacts)

    lifecycle = compute_lifecycle_stance(df, artifacts)

    bend_summary, bend_highlights = compute_bend_outputs(df, artifacts)

    representative_posts = compute_representative_posts(df, artifacts)



    markdown = build_markdown(

        artifacts,

        daily,

        stance_mix,

        lifecycle,

        bend_summary,

        bend_highlights,

        representative_posts,

    )

    summary_path = artifacts.output_dir / "ANALYSIS_RESULTS_SUMMARY.md"

    summary_path.write_text(markdown, encoding="utf-8")





def main(args: Iterable[str] | None = None) -> None:

    parsed = parse_args(args)

    time_aliases = [alias.strip() for alias in (parsed.time_aliases or []) if alias]

    position_aliases = [alias.strip() for alias in (parsed.position_aliases or []) if alias]

    time_candidates = build_candidates(

        parsed.time_column,

        time_aliases,

        ["time", "publish_time", "发布时间", "時間", "时间"],

    )

    position_candidates = build_candidates(

        parsed.position_column,

        position_aliases,

        ["position", "立场", "stance"],

    )

    like_candidates = build_candidates(

        parsed.like_column,

        [],

        ["Like", "likes_count", "点赞数", "點讚數"],

    )

    comment_candidates = build_candidates(

        parsed.comment_column,

        [],

        ["Comment", "comments_count", "评论数", "留言数"],

    )

    forward_candidates = build_candidates(

        parsed.forward_column,

        [],

        ["Forward", "share_count", "reposts_count", "转发数", "分享数"],

    )

    viewpoint_candidates = build_candidates(

        parsed.viewpoint_column,

        [],

        ["Viewpoint", "观点", "Viewpoint_CN"],

    )

    artifacts = DatasetArtifacts(

        input_path=parsed.input,

        output_dir=parsed.output_dir,

        encoding=parsed.encoding,

        top_posts_per_phase=parsed.top_posts,

        time_candidates=time_candidates,

        position_candidates=position_candidates,

        like_candidates=like_candidates,

        comment_candidates=comment_candidates,

        forward_candidates=forward_candidates,

        viewpoint_candidates=viewpoint_candidates,

        group_mode=parsed.group_mode,

    )

    run_pipeline(artifacts)





if __name__ == "__main__":

    main()

