﻿"""Excel to VideoMetadata mapping and field cleaning utilities.

  publish time formatting, duration parsing and categorization.
"""
from __future__ import annotations

import logging
import re
import warnings
import time
from dataclasses import asdict
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple

warnings.filterwarnings(
    "ignore",
    category=UserWarning,
    module="openpyxl",
)

from src.models.video_metadata import (
    VideoMetadata,
    Engagement,
    normalize_cn_unit,
    parse_duration_str,
    categorize_duration,
    make_copy_name,
    make_copy_name_author_date,
    build_author_date_key,
)

import pandas as pd
import csv
from pathlib import Path

from src.utils.config_loader import load_config
from src.utils.path_utils import ensure_directories_from_config
from src.utils.text_cleaner import clean_zh_text
from src.processors.asr_processor import get_asr_text_for_vm, batch_get_asr_texts
from src.processors import text_analyzer, visual_analyzer

LOGGER = logging.getLogger(__name__)

# Column name mapping (Chinese → internal fields)
COLS = {
    "author": "博主名称",
    "original_title": "视频标题",
    "video_url": "视频链接",
    "likes": "视频点赞数",
    "publish_time": "发布时间",
    "duration_str": "视频时长",
    "comments": "评论数",
    "favorites": "收藏数",
    "shares": "转发数",
}


def extract_topics(title: str) -> List[str]:
    """Extract '#' topics from title, deduplicate preserving order.

    Ignores empty tokens and lone '#'.
    """
    if not title:
        return []
    seen: set[str] = set()
    topics: List[str] = []
    # Match '#' followed by non-space, non-# tokens
    for m in re.finditer(r"#([^\s#]+)", title):
        token = m.group(1).strip()
        if not token:
            continue
        tag = f"#{token}"
        if tag not in seen:
            topics.append(tag)
            seen.add(tag)
    return topics


def clean_processed_title(title: str) -> str:
    """Remove topics and redundant '#' and excessive spaces.

    Returns natural language portion.
    """
    if not title:
        return ""
    # Remove all topic segments
    cleaned = re.sub(r"#\S+", " ", title)
    # Remove stray '#'
    cleaned = cleaned.replace("#", " ")
    # Collapse whitespace and trim
    cleaned = re.sub(r"\s+", " ", cleaned).strip()
    return cleaned


def clean_video_url(url: str) -> str:
    """Trim spaces and wrapper characters (quotes/backticks)."""
    if url is None:
        return ""
    s = str(url).strip().strip("`").strip("\"").strip("'")
    return s


def parse_video_id(url: str) -> Optional[str]:
    """Parse Douyin video ID from URL 'https://www.douyin.com/video/{id}'."""
    if not url:
        return None
    m = re.search(r"https?://(?:www\.)?douyin\.com/video/([0-9]+)", url)
    if not m:
        return None
    return m.group(1)


def format_publish_time(s: Any) -> str:
    """Standardize publish time to 'YYYY-MM-DD HH:MM:SS'.

    If input lacks time, use '00:00:00'. Returns 'NA' on invalid.
    """
    if s is None:
        return "NA"
    raw = str(s).strip()
    if not raw:
        return "NA"
    # Try time-included formats first
    fmts_with_time = [
        "%Y/%m/%d %H:%M:%S",
        "%Y-%m-%d %H:%M:%S",
        "%Y.%m.%d %H:%M:%S",
    ]
    for fmt in fmts_with_time:
        try:
            dt = datetime.strptime(raw, fmt)
            return dt.strftime("%Y-%m-%d %H:%M:%S")
        except Exception:
            pass
    # Date-only formats
    fmts_date_only = ["%Y/%m/%d", "%Y-%m/%d", "%Y.%m.%d"]
    for fmt in fmts_date_only:
        try:
            dt = datetime.strptime(raw, fmt)
            return dt.strftime("%Y-%m-%d")
        except Exception:
            pass
    LOGGER.warning("invalid_format: publish_time '%s'", raw)
    return "NA"


def derive_video_type_and_clean_topics(topics: List[str]) -> Tuple[str, List[str]]:
    """Derive `video_type` from topics and remove mapped tags.

    - If any tag equals `#vlog` (case-insensitive), set `video_type` to `vlog`.
    - Remove `#vlog` from returned topics list.
    """
    video_type = "NA"
    cleaned: List[str] = []
    for tag in topics:
        if tag.lstrip("#").lower() == "vlog":
            video_type = "vlog"
        else:
            cleaned.append(tag)
    return video_type, cleaned


def map_row_to_metadata(row: Dict[str, Any]) -> VideoMetadata:
    """Map a single Excel row (Chinese headers) into VideoMetadata.

    Missing/invalid fields follow NA placeholders per project rules.
    """
    author = str(row.get(COLS["author"], "")).strip() or "NA"
    original_title = str(row.get(COLS["original_title"], "")).strip()
    topics = extract_topics(original_title)
    # 派生视频类型，并移除 `#vlog` 出现在话题中的情况
    video_type, topics = derive_video_type_and_clean_topics(topics)
    processed_title = clean_processed_title(original_title)

    # No cleaning or URL-based ID parsing; pass Excel value directly
    video_url_value = row.get(COLS["video_url"], "")
    video_url = str(video_url_value) if video_url_value is not None else "NA"
    video_id = "NA"

    likes = normalize_cn_unit(row.get(COLS["likes"]))
    comments = normalize_cn_unit(row.get(COLS["comments"]))
    favorites = normalize_cn_unit(row.get(COLS["favorites"]))
    shares = normalize_cn_unit(row.get(COLS["shares"]))

    publish_time = format_publish_time(row.get(COLS["publish_time"]))

    duration_str = str(row.get(COLS["duration_str"], "")).strip()
    duration_seconds = parse_duration_str(duration_str)
    duration_category = categorize_duration(duration_seconds)

    vm = VideoMetadata(
        author=author,
        publish_time=publish_time,
        original_title=original_title or "NA",
        processed_title=processed_title or "NA",
        topics=topics,
        video_url=video_url,
        video_id=video_id,
        duration_seconds=duration_seconds,
        duration_category=duration_category,
        engagement=Engagement(
            likes=likes,
            comments=comments,
            favorites=favorites,
            shares=shares,
        ),
        copy_name=make_copy_name_author_date(author, publish_time),
    )
    vm.video_type = video_type
    LOGGER.debug("Mapped row to metadata: %s", asdict(vm))
    return vm


def map_dataframe_to_metadata(df: pd.DataFrame, max_rows: Optional[int] = None) -> List[VideoMetadata]:
    """Map a pandas DataFrame (with Chinese headers) to a list of VideoMetadata.

    - Error isolation: a bad row is logged and skipped.
    - max_rows: limit number of rows processed for sampling/integration tests.
    """
    records = df.to_dict(orient="records")
    if max_rows is not None:
        records = records[:max_rows]

    metas: List[VideoMetadata] = []
    for idx, row in enumerate(records):
        try:
            metas.append(map_row_to_metadata(row))
        except Exception as exc:
            LOGGER.error("processing_failed: row %d error: %s", idx, exc)
            continue
    return metas


def read_excel_and_map(excel_path: str, sheet_name: Optional[str] = None, max_rows: Optional[int] = None) -> List[VideoMetadata]:
    """Read an Excel file and map the first `max_rows` rows to VideoMetadata.

    - Uses openpyxl engine (warnings suppressed elsewhere).
    - Error isolation: file read/parse errors are logged and result in empty list.
    """
    try:
        df = pd.read_excel(excel_path, sheet_name=sheet_name, engine="openpyxl")
    except Exception as exc:
        LOGGER.error("invalid_format: excel read failed for %s: %s", excel_path, exc)
        return []

    # Handle dict-of-DataFrames when sheet_name=None
    if isinstance(df, dict):
        try:
            df = next(iter(df.values()))
        except Exception as exc:
            LOGGER.error("invalid_format: excel content not a DataFrame: %s", exc)
            return []

    return map_dataframe_to_metadata(df, max_rows=max_rows)


# === Batch processing entry returning (metas, errors) ===

def process_excel_to_metadata(excel_path: str, sheet_name: Optional[str] = None, max_rows: Optional[int] = None) -> Tuple[List[VideoMetadata], List[Dict[str, Any]]]:
    """Batch entry: read Excel and return (metas, errors).

    - Errors include file read failure or per-row mapping exceptions.
    - Conforms to error isolation: a bad row does not stop the batch.
    """
    errors: List[Dict[str, Any]] = []
    try:
        df = pd.read_excel(excel_path, sheet_name=sheet_name, engine="openpyxl")
    except Exception as exc:
        msg = f"invalid_format: excel read failed for {excel_path}: {exc}"
        LOGGER.error(msg)
        errors.append({"stage": "read_excel", "error": "invalid_format", "detail": str(exc)})
        return [], errors

    # Handle dict-of-DataFrames when sheet_name=None
    if isinstance(df, dict):
        try:
            df = next(iter(df.values()))
        except Exception as exc:
            LOGGER.error("invalid_format: excel content not a DataFrame: %s", exc)
            errors.append({"stage": "read_excel", "error": "invalid_format", "detail": str(exc)})
            return [], errors

    records = df.to_dict(orient="records")
    if max_rows is not None:
        records = records[:max_rows]

    metas: List[VideoMetadata] = []
    for idx, row in enumerate(records):
        try:
            metas.append(map_row_to_metadata(row))
        except Exception as exc:
            LOGGER.error("processing_failed: row %d error: %s", idx, exc)
            errors.append({"stage": "map_row", "row_index": idx, "error": "processing_failed", "detail": str(exc)})
            continue

    return metas, errors


# === CSV export with integrity checks and TXT generation ===

def _sanitize_filename_component(s: str) -> str:
    """Sanitize a component for safe Windows filename usage."""
    if not s:
        return "NA"
    # Replace forbidden chars \/:*?"<>| with underscore
    return re.sub(r"[\\/:*?\"<>|]", "_", s)


def build_author_time(vm: VideoMetadata) -> str:
    """Build 'author_name+publish_time' identifier for TXT filenames."""
    author = vm.author or "NA"
    publish = vm.publish_time or "NA"
    return f"{_sanitize_filename_component(author)}+{_sanitize_filename_component(publish)}"


def _get_retry_conf(cfg: Dict[str, Any]) -> Dict[str, Any]:
    r = cfg.get("processing", {}).get("retry", {})
    return {
        "max_retries": int(r.get("max_retries", 3)),
        "backoff_initial_seconds": float(r.get("backoff_initial_seconds", 1)),
        "backoff_multiplier": float(r.get("backoff_multiplier", 2)),
        "backoff_max_seconds": float(r.get("backoff_max_seconds", 30)),
    }


def _retry(op, *, max_retries: int, initial: float, multiplier: float, max_delay: float):
    attempt = 0
    delay = initial
    last_exc = None
    while attempt <= max_retries:
        try:
            return op()
        except Exception as exc:
            last_exc = exc
            if attempt == max_retries:
                break
            time.sleep(min(delay, max_delay))
            delay = min(delay * multiplier, max_delay)
            attempt += 1
    raise last_exc


def write_metadata_txt_with_text(vm: VideoMetadata, output_dir: Path, cleaned_text: str, analysis: Dict[str, str]) -> Path:
    """Write TXT file using provided cleaned text (precomputed by batch ASR)."""
    output_dir.mkdir(parents=True, exist_ok=True)
    name = build_author_time(vm) + ".txt"
    path = output_dir / name
    with path.open("w", encoding="utf-8") as f:
        f.write("## 核心元数据摘要\n")
        f.write(f"作者: {vm.author}\n")
        f.write(f"发布时间: {vm.publish_time}\n")
        f.write(f"原始标题: {vm.original_title}\n")
        f.write(f"处理后标题: {vm.processed_title}\n")
        f.write(f"话题: {', '.join(vm.topics)}\n")
        f.write(f"视频链接: {vm.video_url}\n")
        f.write(f"视频ID: {vm.video_id}\n")
        f.write(f"时长秒数: {vm.duration_seconds if vm.duration_seconds is not None else 'NA'}\n")
        f.write(f"时长分类: {vm.duration_category}\n")
        f.write(
            "互动: "
            f"点赞数:{vm.engagement.likes};"
            f"评论数:{vm.engagement.comments};"
            f"收藏数:{vm.engagement.favorites};"
            f"转发数:{vm.engagement.shares}\n"
        )
        f.write(f"副本名: {build_author_time(vm)}\n")
        # 模型分析字段
        f.write(f"主题: {analysis.get('theme', 'NA')}\n")
        f.write(f"感情色彩: {analysis.get('sentiment', 'NA')}\n")
        f.write(f"语言风格: {analysis.get('style', 'NA')}\n")
        f.write(f"标题情感表达: {analysis.get('title_emotion', 'NA')}\n")
        f.write(f"标题平台话题: {analysis.get('title_topic', 'NA')}\n")
        f.write(f"出境人物: {analysis.get('actor', 'NA')}\n")
        f.write(f"视听符号: {analysis.get('symbols', 'NA')}\n")
        f.write("\n## 视频脚本\n")
        f.write(cleaned_text or "NA")
    LOGGER.info("Wrote TXT: %s", path)
    return path


def export_excel_to_csv_and_txt(
    excel_path: str,
    config_path: str = "config/config.yaml",
    sheet_name: Optional[str] = None,
    max_rows: Optional[int] = None,
) -> Tuple[List[VideoMetadata], List[Dict[str, Any]], Dict[str, int], Path, List[Path]]:
    """Export pipeline: Excel 鈫?(VideoMetadata) 鈫?CSV + TXT.

    Returns (metas, errors, csv_path, txt_paths).
    - Performs data integrity checks before CSV write (e.g., empty video_id 鈫?NA + log).
    - Uses `config/config.yaml` for output paths (data/result/result.csv and data/result/txt_files).
    """
    # Ensure directories from config
    cfg = load_config(config_path)
    ensure_directories_from_config(config_path)

    metas, errors = process_excel_to_metadata(excel_path, sheet_name=sheet_name, max_rows=max_rows)

    # Resolve video_id from URL if NA to enable ASR/VLM
    for vm in metas:
        try:
            if not vm.video_id or str(vm.video_id).strip().lower() == "na":
                u = clean_video_url(vm.video_url or "")
                vid = parse_video_id(u or "")
                if vid:
                    vm.video_id = vid
                    vm.copy_name = make_copy_name(vid)
                if vid:
                    vm.video_id = vid
                    # 缁熶竴瑕嗙洊涓?浣滆€?鏃ユ湡 鐨?copy_name锛堜笌鏈湴MP4璺緞锛?                    vm.copy_name = make_copy_name_author_date(vm.author, vm.publish_time)
        except Exception as exc:
            LOGGER.warning("video_id_resolve_from_url_failed: %s", exc)

    # 已在 `map_row_to_metadata` 阶段完成派生与清理
    # Integrity checks before CSV write
    for vm in metas:
        if not vm.video_id or str(vm.video_id).strip().lower() == "na":
            LOGGER.warning("data_integrity: video_id empty for author=%s, marking NA", vm.author)
            vm.video_id = "NA"
        # Ensure copy_name consistent
        if (not vm.copy_name) and vm.video_id and str(vm.video_id).strip().lower() != "na":
            vm.copy_name = make_copy_name(vm.video_id)

    # === Batch ASR for TXT content (before CSV to enable analysis fields) ===
    cleaned_texts: List[str] = []
    try:
        cleaned_texts = batch_get_asr_texts(metas, cfg)
    except Exception as exc:
        LOGGER.warning("asr_batch_fallback: batch_get_asr_texts failed: %s", exc)
        cleaned_texts = [clean_zh_text(vm.processed_title or "") for vm in metas]

    # === Text & Visual Analysis (M8/M9) ===
    # Prepare per-item analysis dicts to merge into CSV rows
    analysis_results: List[Dict[str, str]] = []
    mappings: List[Dict[str, str]] = []
    for vm, txt in zip(metas, cleaned_texts):
        # Text analysis with fallback handled inside analyzer
        ta_res = {"theme": "NA", "sentiment": "NA", "style": "NA", "title_emotion": "NA", "title_topic": "NA"}
        try:
            ta_res = text_analyzer.analyze_video_script(txt or "", cfg)
        except Exception as exc:
            LOGGER.warning("text_analyzer_fallback: %s", exc)
        # Visual analysis: if no usable video path, fallback NA
        vlm_res = {"actor": "NA", "symbols": "NA"}
        try:
            # 基于作者+日期的本地 MP4 匹配（与 S:/ 原始命名模式对齐）；不使用 video_id 优先
            input_root = cfg.get("paths", {}).get("input_videos", "S:/")
            candidate_path = None
            author = vm.author or "NA"
            publish_date = (vm.publish_time or "NA").split(" ")[0]
            from src.utils.media_utils import find_mp4_for_vm
            mp4 = find_mp4_for_vm(author, vm.publish_time or "", input_root)
            if mp4:
                candidate_path = str(mp4)
            mappings.append({
                "author": author,
                "publish_date": publish_date,
                "author_date_key": build_author_date_key(vm.author, vm.publish_time or "NA"),
                "matched_mp4_path": candidate_path or "NA",
            })
            if candidate_path:
                vlm_res = visual_analyzer.analyze_video_content(candidate_path, cfg)
        except Exception as exc:
            LOGGER.warning("visual_analyzer_fallback_NA: %s", exc)
        merged = {
            "theme": ta_res.get("theme", "NA") or "NA",
            "sentiment": ta_res.get("sentiment", "NA") or "NA",
            "style": ta_res.get("style", "NA") or "NA",
            "title_emotion": ta_res.get("title_emotion", "NA") or "NA",
            "title_topic": ta_res.get("title_topic", "NA") or "NA",
            "actor": vlm_res.get("actor", "NA") or "NA",
            "symbols": vlm_res.get("symbols", "NA") or "NA",
        }
        analysis_results.append(merged)

    # Prepare CSV rows with configured column order
    csv_columns: List[str] = cfg.get("csv", {}).get("columns", [])
    output_csv = Path(cfg.get("paths", {}).get("output_csv", "data/result/result.csv"))
    output_txt_dir = Path(cfg.get("paths", {}).get("output_txt", "data/result/txt_files"))

    # Write CSV with retry (merge analysis fields into rows)
    retry_conf = _get_retry_conf(cfg)
    def _write_csv_once():
        with output_csv.open("w", encoding="utf-8", newline="") as f:
            writer = csv.DictWriter(f, fieldnames=csv_columns)
            writer.writeheader()
            for vm, res in zip(metas, analysis_results):
                row = vm.to_csv_row()
                row.update(res)
                if not row.get("video_id") or str(row.get("video_id")).strip().lower() == "na":
                    row["video_id"] = "NA"
                writer.writerow(row)
        return True
    try:
        _retry(
            _write_csv_once,
            max_retries=retry_conf["max_retries"],
            initial=retry_conf["backoff_initial_seconds"],
            multiplier=retry_conf["backoff_multiplier"],
            max_delay=retry_conf["backoff_max_seconds"],
        )
        LOGGER.info("Wrote CSV: %s (rows=%d)", output_csv, len(metas))
    except Exception as exc:
        LOGGER.error("processing_failed: write csv failed: %s", exc)
        errors.append({"stage": "write_csv", "error": "processing_failed", "detail": str(exc)})

    # === 鏄犲皠鎶ュ憡鍐欏嚭锛堜綔鑰?鏃ユ湡 鈫?鏈湴MP4璺緞锛?===
    output_mapping = Path(cfg.get("paths", {}).get("output_mapping", "data/result/author_times.csv"))
    def _write_mapping_once():
        with output_mapping.open("w", encoding="utf-8", newline="") as f:
            writer = csv.DictWriter(f, fieldnames=["author", "publish_date", "author_date_key", "matched_mp4_path"])
            writer.writeheader()
            for m in mappings:
                writer.writerow(m)
        return True
    try:
        _retry(
            _write_mapping_once,
            max_retries=retry_conf["max_retries"],
            initial=retry_conf["backoff_initial_seconds"],
            multiplier=retry_conf["backoff_multiplier"],
            max_delay=retry_conf["backoff_max_seconds"],
        )
        LOGGER.info("Wrote mapping report: %s (rows=%d)", output_mapping, len(mappings))
    except Exception as exc:
        LOGGER.error("processing_failed: write mapping failed: %s", exc)
        errors.append({"stage": "write_mapping", "error": "processing_failed", "detail": str(exc)})

    # Write TXT files per metadata with retry using precomputed texts
    txt_paths: List[Path] = []
    for vm, txt, res in zip(metas, cleaned_texts, analysis_results):
        try:
            path = _retry(
                lambda: write_metadata_txt_with_text(vm, output_txt_dir, txt, res),
                max_retries=retry_conf["max_retries"],
                initial=retry_conf["backoff_initial_seconds"],
                multiplier=retry_conf["backoff_multiplier"],
                max_delay=retry_conf["backoff_max_seconds"],
            )
            txt_paths.append(path)
        except Exception as exc:
            LOGGER.error("processing_failed: write txt failed for %s: %s", vm.author, exc)
            errors.append({"stage": "write_txt", "error": "processing_failed", "detail": str(exc), "author": vm.author})

    # Error summary aggregation
    error_summary: Dict[str, int] = {}
    for e in errors:
        key = e.get("error", "unknown")
        error_summary[key] = error_summary.get(key, 0) + 1

    return metas, errors, error_summary, output_csv, txt_paths


def get_asr_clean_text(vm: VideoMetadata, cfg: Dict[str, Any]) -> str:
    """Get ASR-cleaned text with retry per processing.retry config."""
    retry_conf = _get_retry_conf(cfg)
    try:
        return _retry(
            lambda: get_asr_text_for_vm(vm, cfg),
            max_retries=retry_conf["max_retries"],
            initial=retry_conf["backoff_initial_seconds"],
            multiplier=retry_conf["backoff_multiplier"],
            max_delay=retry_conf["backoff_max_seconds"],
        )
    except Exception as exc:
        LOGGER.error("processing_failed: asr_clean_text failed for %s: %s", vm.author, exc)
        return clean_zh_text(vm.processed_title or "")