#!/usr/bin/env python3
import hashlib
import subprocess
import tempfile
import uuid
import json
import time
from pathlib import Path
from typing import List, Dict
from datetime import datetime

from faster_whisper import WhisperModel
from pydub import AudioSegment
from pydub.generators import Sine

# ────── 路径常量 ──────
BASE = Path(__file__).resolve().parent
INPUTS = Path("/Users/dusen/Editor/01-内容素材库/20-直播/直播片段")
OUTPUTS = Path("/Users/dusen/Editor/01-内容素材库/20-直播/直播片段（脱敏）/敏感词消音")
SUBS = BASE / "subtitles"
KEYWORDS_DIR = BASE / "keywords"

# ────── 配置 ──────
MODEL_NAME   = "turbo"
CPU_THREADS  = 8
SAMPLE_RATE  = 16000
SEG_LEN_SEC  = 30
PRE_PAD_MS, POST_PAD_MS = 50, 100
BEEP_HZ = 1000

def load_keywords(folder: Path):
    kws, cmap = set(), {}
    for p in folder.glob("*.*"):
        if "-" not in p.stem:
            continue
        bad, _ = p.stem.split("-", 1)
        bad = bad.strip()
        kws.add(bad)
        cmap[bad] = str(p.resolve())
    return kws, cmap

KEYWORDS, CUSTOM_MAP = load_keywords(KEYWORDS_DIR)

def md5_file(path: Path, buf: int = 1 << 20) -> str:
    h = hashlib.md5()
    with path.open("rb") as f:
        for chunk in iter(lambda: f.read(buf), b""):
            h.update(chunk)
    return h.hexdigest()

def transcribe_or_load(wav: Path, cache_json: Path) -> List[Dict]:
    if cache_json.exists():
        return json.loads(cache_json.read_text("utf-8"))
    model = WhisperModel(
        MODEL_NAME, device="cpu",
        cpu_threads=CPU_THREADS, compute_type="int8"
    )
    seg_dir = wav.parent / "seg"
    seg_dir.mkdir(exist_ok=True)
    subprocess.run([
        "ffmpeg", "-y", "-i", str(wav),
        "-f", "segment", "-segment_time", str(SEG_LEN_SEC),
        "-reset_timestamps", "1",
        str(seg_dir / "seg_%04d.wav")
    ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
    words, offset = [], 0.0
    for seg in sorted(seg_dir.glob("seg_*.wav")):
        dur = float(subprocess.check_output([
            "ffprobe", "-v", "error",
            "-show_entries", "format=duration",
            "-of", "default=nokey=1:noprint_wrappers=1",
            str(seg)
        ]))
        segs, _ = model.transcribe(
            str(seg), language="zh", word_timestamps=True
        )
        for sg in segs:
            for w in sg.words:
                txt = w.word.strip()
                if txt:
                    words.append({
                        "word": txt,
                        "start": w.start + offset,
                        "end": w.end + offset
                    })
        offset += dur
    cache_json.write_text(json.dumps(words, ensure_ascii=False), "utf-8")
    return words

def match_segments(words: List[Dict]) -> List[tuple]:
    chars  = [w["word"] for w in words]
    starts = [w["start"] for w in words]
    ends   = [w["end"]   for w in words]
    maxL   = max((len(k) for k in KEYWORDS), default=1)
    res, i = [], 0
    while i < len(chars):
        hit = None
        for L in range(maxL, 0, -1):
            if i + L > len(chars): continue
            phrase = "".join(chars[i:i+L])
            if phrase in KEYWORDS:
                hit = (starts[i], ends[i+L-1], phrase)
                break
        if hit:
            res.append(hit)
            i += len(hit[2])
        else:
            i += 1
    return res

def replace_audio(orig_wav: Path, targets: List[tuple]) -> AudioSegment:
    orig = AudioSegment.from_wav(str(orig_wav))
    if not targets:
        return orig
    out, last = AudioSegment.empty(), 0
    for st, ed, kw, mode in sorted(targets, key=lambda x: x[0]):
        a = max(0, int(st*1000) - PRE_PAD_MS)
        b = int(ed*1000) + POST_PAD_MS
        out += orig[last:a]
        dur = b - a
        if mode == "silence":
            rep = AudioSegment.silent(duration=dur)
        elif mode == "beep":
            rep = Sine(BEEP_HZ).to_audio_segment(duration=dur)
        else:  # custom
            path = CUSTOM_MAP.get(kw)
            if path and Path(path).exists():
                clip = AudioSegment.from_file(path)
                if len(clip) < dur:
                    clip *= dur // len(clip) + 1
                rep = clip[:dur]
            else:
                rep = AudioSegment.silent(duration=dur)
        out += rep
        last = b
    out += orig[last:]
    return out

def process_video_file(in_video: Path, replace_list: List[str], silence_list: List[str]):
    md5 = md5_file(in_video)
    sub_json = SUBS / f"{md5}.json"
    
    # 创建与原目录结构相同的输出路径，保持相同文件名
    relative_path = in_video.relative_to(INPUTS)
    out_video = OUTPUTS / relative_path
    
    # 检查目标文件是否已经存在
    if out_video.exists():
        print(f"Skipping {in_video}, file already processed.")
        return out_video

    # 确保输出目录存在
    out_video.parent.mkdir(parents=True, exist_ok=True)

    # 1. 提取音轨
    tmp_dir = tempfile.TemporaryDirectory()
    wav = Path(tmp_dir.name) / "audio.wav"
    subprocess.run([
        "ffmpeg", "-y", "-i", str(in_video),
        "-vn", "-ac", "1", "-ar", str(SAMPLE_RATE), str(wav)
    ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
    
    # 2. 转写或加载缓存
    words = transcribe_or_load(wav, sub_json)

    # 3. 内置关键词匹配，筛出替换任务
    base_hits = match_segments(words)
    rep_hits = [(st, ed, kw) for st, ed, kw in base_hits if kw in replace_list]

    # 4. 全量消音匹配（包括用户新加词）
    sil_hits: List[tuple] = []
    for phrase in silence_list:
        L = len(phrase)
        for i in range(len(words) - L + 1):
            seq = "".join(w["word"] for w in words[i:i+L])
            if seq == phrase:
                st = words[i]["start"]
                ed = words[i+L-1]["end"]
                sil_hits.append((st, ed, phrase))

    # 5. 合并 targets 并标记模式
    targets: List[tuple] = []
    for st, ed, kw in rep_hits:
        targets.append((st, ed, kw, "custom"))
    for st, ed, kw in sil_hits:
        targets.append((st, ed, kw, "silence"))

    # 6. 生成新音轨并合成视频
    new_audio = replace_audio(wav, targets)
    new_wav = Path(tmp_dir.name) / "new.wav"
    new_audio.export(str(new_wav), format="wav")
    subprocess.run([
        "ffmpeg", "-y",
        "-i", str(in_video),
        "-i", str(new_wav),
        "-c:v", "copy", "-map", "0:v:0", "-map", "1:a:0",
        "-shortest", str(out_video)
    ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
    tmp_dir.cleanup()

    return out_video

def process_all_videos():
    replace_list = []
    silence_list = ['业力', '业障', '中阴身', '他心通', '众生', '传法', '佛法', '佛牌', '入定', '八字', '六根', '六爻', '六道', '出定', '前世', '占卜', '地狱道', '塔罗', '天眼', '天耳', '天道', '夺舍', '奇门遁甲', '实修', '宿命通', '密宗', '往生', '忏悔', '念咒', '恶业', '恶缘', '戒律', '手相', '打坐', '托梦', '护法', '招魂', '持戒', '星象', '显灵', '极乐世界', '果报', '法器', '法界', '活佛', '涅槃', '消业', '灌顶', '灵媒', '灵界', '狐仙', '畜生道', '皈依', '神通', '禅定', '符咒', '算命', '精怪', '罗汉', '舍利', '菩萨', '超度', '转世', '轮回', '辟邪', '还债', '违缘', '通灵', '邪灵', '镇宅', '阎王', '阴德', '阿修罗道', '阿罗汉', '阿赖耶识', '附体', '风水', '饿鬼道', '驱邪', '鬼', '鬼魂', '魂魄', '轮回', '佛']

    for video_file in INPUTS.glob("**/*.mp4"):  # Adjust the file pattern to match your videos
        print(f"Processing {video_file}...")
        processed_video = process_video_file(video_file, replace_list, silence_list)
        print(f"Processed video saved to {processed_video}")

if __name__ == "__main__":
    process_all_videos()
