#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from __future__ import annotations
from pathlib import Path
from datetime import datetime
import json
import os
import re
from typing import Dict, List, Tuple
import pandas as pd

# ========== 路径 ==========
def project_root() -> Path:
    """
    项目根目录：当前文件位于 utils/flipcrawler/ 下，所以取 parents[2]
    .../FlipCast/utils/flipcrawler/save.py -> parents[2] = .../FlipCast
    """
    return Path(__file__).resolve().parents[2]

def data_root() -> Path:
    p = project_root() / "data"
    p.mkdir(parents=True, exist_ok=True)
    return p

def cumulative_dir() -> Path:
    p = data_root() / "cumulative"
    p.mkdir(parents=True, exist_ok=True)
    return p

def _next_run_dir_name(digits: int = 5) -> str:
    """
    在 data/ 下寻找形如 00001, 00002 的目录名，返回下一个；不存在则返回 '00001'。
    """
    pattern = re.compile(rf"^\d{{{digits}}}$")
    existing = []
    for child in data_root().iterdir():
        if child.is_dir() and pattern.match(child.name):
            existing.append(int(child.name))
    if not existing:
        return f"{1:0{digits}d}"
    return f"{max(existing) + 1:0{digits}d}"

def make_run_dir(digits: int = 5) -> Path:
    name = _next_run_dir_name(digits=digits)
    run = data_root() / name
    run.mkdir(parents=True, exist_ok=True)
    # 更新 latest 软链接（若支持）
    latest = data_root() / "latest"
    try:
        if latest.exists() or latest.is_symlink():
            latest.unlink()
        os.symlink(name, latest)  # 相对链接
    except Exception:
        pass
    return run

# ========== 写入单次抓取 ==========
def write_category_csv(df: pd.DataFrame, category: str, run_dir: Path) -> Path:
    out_path = run_dir / f"B站TOP100-{category}.csv"
    df.to_csv(out_path, index=False, encoding="utf_8_sig")
    return out_path

# ========== 累计/递增 ==========
def cumulative_path(category: str) -> Path:
    return cumulative_dir() / f"B站TOP100-{category}.csv"

def _norm_columns(df: pd.DataFrame) -> pd.DataFrame:
    cols = [
        "bvid", "视频标题", "视频地址", "作者",
        "播放数", "弹幕数", "投币数", "点赞数", "分享数", "收藏数",
        "抓取时间",
    ]
    out = df.copy()
    for c in cols:
        if c not in out.columns:
            out[c] = pd.NA if c != "抓取时间" else datetime.now().isoformat(timespec="seconds")
    return out[cols]

def incremental_update(category: str, current_df: pd.DataFrame) -> Tuple[int, int, Path]:
    """按 bvid 去重，把本次的新视频追加进累计文件。"""
    current_df = _norm_columns(current_df)
    cum_path = cumulative_path(category)

    if not cum_path.exists():
        current_df.to_csv(cum_path, index=False, encoding="utf_8_sig")
        return (len(current_df), len(current_df), cum_path)

    prev = pd.read_csv(cum_path)
    if "bvid" not in prev.columns:
        # 兼容历史：从视频地址抽 bvid
        if "视频地址" in prev.columns:
            prev["bvid"] = prev["视频地址"].str.extract(r"/video/([A-Za-z0-9]+)", expand=False)
        else:
            prev["bvid"] = pd.NA

    prev = _norm_columns(prev)

    prev_bvids = set(prev["bvid"].dropna().astype(str))
    new_mask = ~current_df["bvid"].astype(str).isin(prev_bvids)
    new_rows = current_df.loc[new_mask]

    updated = pd.concat([prev, new_rows], ignore_index=True)
    sort_cols = [c for c in ["抓取时间", "点赞数", "播放数"] if c in updated.columns]
    if sort_cols:
        updated = updated.sort_values(by=sort_cols, ascending=[False] + [False]*(len(sort_cols)-1))

    updated.to_csv(cum_path, index=False, encoding="utf_8_sig")
    return (len(new_rows), len(updated), cum_path)

# ========== manifest ==========
def write_manifest(run_dir: Path, items: List[Dict]) -> Path:
    manifest = {
        "run_dir": str(run_dir),
        "created_at": datetime.now().isoformat(timespec="seconds"),
        "items": items,
    }
    path = run_dir / "manifest.json"
    path.write_text(json.dumps(manifest, ensure_ascii=False, indent=2), encoding="utf-8")
    return path
