"""ASR processor: locate video → extract audio → Whisper → clean text.

- Single responsibility: given VideoMetadata and config, return cleaned ASR text.
- Follows project rules: PEP8, type annotations, logging, error isolation, retry optional.
- Safe fallbacks: on any failure, return cleaned processed_title.
"""
from __future__ import annotations

import logging
from pathlib import Path
from typing import Any, Dict, Optional, List, Callable

from src.models.video_metadata import VideoMetadata
from src.utils import media_utils
from src.utils.text_cleaner import clean_zh_text
from src.utils.media_utils import find_mp4_for_vm
from concurrent.futures import ThreadPoolExecutor, TimeoutError
import time

LOGGER = logging.getLogger(__name__)


def _transcribe_with_timeout(wav_path: str, cfg: Dict[str, Any]) -> str:
    """Run Whisper transcribe with timeout from config.asr.timeout_seconds.

    Raises TimeoutError if transcribe exceeds timeout.
    """
    try:
        import whisper  # type: ignore
    except Exception as exc:
        raise RuntimeError(f"whisper_import_failed: {exc}")

    model_size = cfg.get("asr", {}).get("model_size", "small")
    device = cfg.get("asr", {}).get("device", "cuda")
    language = cfg.get("asr", {}).get("language", "zh")
    timeout = int(cfg.get("asr", {}).get("timeout_seconds", 180))

    def _do_transcribe() -> str:
        model = whisper.load_model(model_size, device=device)
        result = model.transcribe(str(wav_path), language=language)
        return result.get("text", "")

    with ThreadPoolExecutor(max_workers=1) as ex:
        future = ex.submit(_do_transcribe)
        return future.result(timeout=timeout)


def get_asr_text_for_vm(vm: VideoMetadata, cfg: Dict[str, Any]) -> str:
    """Return cleaned text via Whisper ASR; fallback to processed_title.

    Steps:
    - find local video under paths.input_videos (作者+日期匹配)
    - extract audio to paths.temp_audio
    - run Whisper with timeout
    - clean text using clean_zh_text
    """
    try:
        input_root = cfg.get("paths", {}).get("input_videos", "S:/")
        temp_audio_dir = Path(cfg.get("paths", {}).get("temp_audio", "data/result/audio_temp"))
        temp_audio_dir.mkdir(parents=True, exist_ok=True)

        # 依据作者+发布日期命名规则进行匹配（不使用 video_id 优先）
        candidate = find_mp4_for_vm(vm.author or "", vm.publish_time or "", input_root)
        if candidate is None:
            return clean_zh_text(vm.processed_title or "")

        author_date = (vm.author or "NA") + "+" + ((vm.publish_time or "NA").split(" ")[0])
        out_wav = temp_audio_dir / f"VID_{author_date}.wav"
        wav_path = media_utils.extract_audio_from_video(str(candidate), str(out_wav))

        try:
            text = _transcribe_with_timeout(str(wav_path), cfg)
            if text:
                return clean_zh_text(text)
            return clean_zh_text(vm.processed_title or "")
        except TimeoutError:
            LOGGER.warning("api_timeout: whisper transcribe timeout for %s", vm.author)
            return clean_zh_text(vm.processed_title or "")
        except Exception as exc:
            LOGGER.warning("asr_fallback: whisper failed for %s: %s", vm.author, exc)
            return clean_zh_text(vm.processed_title or "")
    except Exception as exc:
        LOGGER.error("processing_failed: get_asr_text_for_vm error: %s", exc)
        return clean_zh_text(vm.processed_title or "")


def _get_retry_conf(cfg: Dict[str, Any]) -> Dict[str, Any]:
    p = cfg.get("processing", {})
    r = p.get("retry", {})
    return {
        "max_retries": int(r.get("max_retries", 3)),
        "initial_backoff": float(r.get("initial_backoff_seconds", 0.5)),
        "max_backoff": float(r.get("max_backoff_seconds", 4.0)),
        "factor": float(r.get("backoff_factor", 2.0)),
    }


def _retry(op: Callable[[], str], retry_conf: Dict[str, Any], fallback: str) -> str:
    tries = retry_conf["max_retries"]
    backoff = retry_conf["initial_backoff"]
    max_backoff = retry_conf["max_backoff"]
    factor = retry_conf["factor"]
    for attempt in range(tries):
        try:
            res = op()
            return res if res else fallback
        except TimeoutError:
            LOGGER.warning("api_timeout: attempt %d timed out", attempt + 1)
        except Exception as exc:
            LOGGER.warning("asr_fallback: attempt %d failed: %s", attempt + 1, exc)
        time.sleep(backoff)
        backoff = min(max_backoff, backoff * factor)
    return fallback


def batch_get_asr_texts(items: List[VideoMetadata], cfg: Dict[str, Any]) -> List[str]:
    """Batch ASR for a list of VideoMetadata with concurrency and retry.

    - Concurrency controlled by config.processing.concurrency.max_workers (default 4)
    - Per-future timeout controlled by config.processing.concurrency.timeout_per_video_seconds (default 180)
    - Retry controlled via config.processing.retry
    - Always returns a list aligned with input order
    """
    retry_conf = _get_retry_conf(cfg)
    max_workers = int(cfg.get("processing", {}).get("concurrency", {}).get("max_workers", 4))
    per_timeout = float(cfg.get("processing", {}).get("concurrency", {}).get("timeout_per_video_seconds", 180))

    results: List[str] = []
    with ThreadPoolExecutor(max_workers=max_workers) as ex:
        futures = []
        for vm in items:
            fallback = clean_zh_text(vm.processed_title or "")
            futures.append(
                ex.submit(lambda vm=vm, fb=fallback: _retry(lambda: get_asr_text_for_vm(vm, cfg), retry_conf, fb))
            )
        for vm, fut in zip(items, futures):
            try:
                txt = fut.result(timeout=per_timeout)
                results.append(clean_zh_text(txt))
            except TimeoutError:
                LOGGER.warning("api_timeout: batch future timeout for %s", vm.author)
                results.append(clean_zh_text(vm.processed_title or ""))
            except Exception as exc:
                LOGGER.warning("processing_failed: batch future error for %s: %s", vm.author, exc)
                results.append(clean_zh_text(vm.processed_title or ""))
    return results


def transcribe_audio(audio_path: str, cfg: Dict[str, Any]) -> str:
    retry_conf = _get_retry_conf(cfg)
    fallback = "NA"
    return clean_zh_text(
        _retry(lambda: _transcribe_with_timeout(str(audio_path), cfg), retry_conf, fallback)
    )


def batch_transcribe(audio_paths: list[str], cfg: Dict[str, Any]) -> Dict[str, str]:
    max_workers = int(cfg.get("processing", {}).get("concurrency", {}).get("max_workers", 4))
    per_timeout = float(cfg.get("processing", {}).get("concurrency", {}).get("timeout_per_video_seconds", 180))
    retry_conf = _get_retry_conf(cfg)

    out: Dict[str, str] = {}
    with ThreadPoolExecutor(max_workers=max_workers) as ex:
        futures: Dict[str, Any] = {}
        for ap in audio_paths:
            futures[ap] = ex.submit(
                lambda ap=ap: clean_zh_text(
                    _retry(lambda: _transcribe_with_timeout(str(ap), cfg), retry_conf, "NA")
                )
            )
        for ap, fut in futures.items():
            try:
                out[ap] = fut.result(timeout=per_timeout)
            except TimeoutError:
                LOGGER.warning("api_timeout: batch transcribe timeout for %s", ap)
                out[ap] = clean_zh_text("NA")
            except Exception as exc:
                LOGGER.warning("processing_failed: batch transcribe error for %s: %s", ap, exc)
                out[ap] = clean_zh_text("NA")
    return out



