# utils.py
import subprocess
import os
from pathlib import Path
from translate import Translator
from pysrt import open as open_srt

def run_extract_audio(video_path: str, output_audio: str = "audio.wav") -> str:
    print("🔊 正在提取音频...")
    cmd = ["ffmpeg", "-y", "-i", video_path, "-vn", "-acodec", "pcm_s16le", "-ar", "16000", "-ac", "1", output_audio]
    subprocess.run(cmd, check=True)
    return output_audio

def run_whisper(audio_path: str, output_srt: str = "audio.srt") -> str:
    print("🧠 正在进行语音识别 (whisper)...")
    cmd = ["whisper", audio_path, "--language", "en", "--task", "transcribe", "--output_format", "srt"]
    subprocess.run(cmd, check=True)
    return output_srt

def extract_subtitle(video_path: str, output_srt: str = "extracted.srt") -> str:
    print("🎞️ 正在提取内嵌字幕...")
    cmd = ["ffmpeg", "-y", "-i", video_path, "-map", "0:s:0", output_srt]
    subprocess.run(cmd, check=True)
    return output_srt

def translate_subtitle(srt_path: str, target_lang: str = "zh", output_srt: str = "translated.srt") -> str:
    print("🌐 正在翻译字幕为中文...")
    subs = open_srt(srt_path, encoding="utf-8")
    translator = Translator(to_lang=target_lang)
    for sub in subs:
        try:
            sub.text = translator.translate(sub.text)
        except Exception as e:
            print(f"翻译失败: {sub.text} -> {e}")
    subs.save(output_srt, encoding="utf-8")
    return output_srt

def embed_subtitle(video_path: str, srt_path: str, output_path: str):
    print("🎬 正在将字幕嵌入视频...")
    cmd = ["ffmpeg", "-y", "-i", video_path, "-vf", f"subtitles={srt_path}", output_path]
    subprocess.run(cmd, check=True)

def run_zits_removal(video_path: str, srt_path: str, output_path: str = "zit_cleaned.mp4"):
    import os
    import cv2
    import numpy as np
    from tqdm import tqdm
    from pysrt import open as open_srt
    import subprocess

    from ZITS.inference import ZITSInpainter  # 假设你已写一个inference类封装ZITS调用

    print("📽️ 准备视频帧和字幕...")
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    subs = open_srt(srt_path, encoding="utf-8")
    frame_text_map = {}
    for sub in subs:
        start = int(sub.start.ordinal / 1000 * fps)
        end = int(sub.end.ordinal / 1000 * fps)
        for f in range(start, end + 1):
            frame_text_map[f] = sub.text.strip()

    os.makedirs("zit_output/frames", exist_ok=True)

    inpainter = ZITSInpainter(device="cuda" if torch.cuda.is_available() else "cpu")

    for i in tqdm(range(total_frames), desc="🔍 正在处理字幕帧"):
        ret, frame = cap.read()
        if not ret:
            break
        text = frame_text_map.get(i, None)
        if text:
            cleaned = inpainter.inpaint(frame, text)
        else:
            cleaned = frame
        cv2.imwrite(f"zit_output/frames/{i:06d}.png", cleaned)
    cap.release()

    print("🎞️ 合并为视频...")
    subprocess.run([
        "ffmpeg", "-y", "-framerate", str(fps), "-i", "zit_output/frames/%06d.png",
        "-c:v", "libx264", "-pix_fmt", "yuv420p", "zit_output/video.mp4"
    ], check=True)

    print("🔊 合并音频...")
    subprocess.run([
        "ffmpeg", "-y", "-i", "zit_output/video.mp4", "-i", video_path,
        "-c", "copy", "-map", "0:v:0", "-map", "1:a:0", output_path
    ], check=True)

    print(f"✅ 字幕已清除，输出：{output_path}")
