#!/usr/bin/env python3
"""
google_stt_replace_v1_gcs_or_recognize.py
— 本地视频多字关键词替换 (Custom) + FLAC + 时长分支：短走 recognize、长走 GCS+long_running

依赖：
    pip install --upgrade google-cloud-speech google-cloud-storage ffmpeg-python pydub

前提：
  * GCP 已启用 Speech-to-Text API V1  
  * 已创建 GCS 桶并赋予 Service Account 写权限  
  * 已下载 Service Account JSON，并设置 GOOGLE_APPLICATION_CREDENTIALS  
"""

import os
# ─── 自动代理 ───────────────────────────────
os.environ.update({
    "http_proxy":  "http://127.0.0.1:7890",
    "https_proxy": "http://127.0.0.1:7890",
    "all_proxy":   "socks5://127.0.0.1:7890",
    "HTTP_PROXY":  "http://127.0.0.1:7890",
    "HTTPS_PROXY": "http://127.0.0.1:7890",
    "ALL_PROXY":   "socks5://127.0.0.1:7890",
})
# ─────────────────────────────────────────────

import subprocess, tempfile, json
from pathlib import Path
from typing import List, Dict, Tuple

from pydub import AudioSegment
from google.cloud import speech, storage
from google.cloud.speech import RecognitionConfig, RecognitionAudio, SpeechAdaptation

# ─────────── 配置区 ───────────────────────────
KEY_PATH    = "google.key"
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = KEY_PATH
PHRASESET_NAME = "projects/758813863716/locations/global/phraseSets/ai-desens"

VIDEO       = "11.高手经营事业，如如游戏人间（模糊）.vf1080p24.mp4"
OUTPUT      = "output3.mp4"
GCS_BUCKET  = "ai-desens"

LANGUAGE      = "zh-CN"
SAMPLE_RATE = 16000  # 16 kHz，恢复模型训练采样率
ENABLE_WORDTS = True

KEYWORDS = {
    "福报","轮回","菩提心","业障","自性","天命",
    "法门","业力","习气","法界","因缘","修行","打坐"
}

MODE       = "custom"
PRE_PAD_MS = 200
POST_PAD_MS= 200

CUSTOM_MAP: Dict[str,str] = {
    '福报': 'audio/福报-福气.MP3',
    '菩提心': 'audio/菩提心-圣心.MP3',
    '业障': 'audio/业障-障碍.MP3',
    '法界': 'audio/法界-宇宙.MP3',
    '天命': 'audio/天命-命运.MP3',
    '轮回': 'audio/轮回-循环.MP3',
    '打坐': 'audio/打坐-静坐.MP3',
    '法门': 'audio/法门-方法.MP3',
    '因缘': 'audio/因缘-条件.MP3',
    '习气': 'audio/习气-秉性.MP3',
    '修行': 'audio/修行-修心.MP3',
    '业力': 'audio/业力-禀赋.MP3',
    '自性': 'audio/自性-根本道.MP3',
}
# ─────────────────────────────────────────────

def extract_flac(video_path: str, out: Path):
    subprocess.run([
        "ffmpeg","-y","-i",video_path,
        "-vn","-acodec","flac",
        "-ar",str(SAMPLE_RATE),"-ac","1",
        str(out)
    ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

def mux_back(video_path: str, new_audio: Path, out_video: str):
    subprocess.run([
        "ffmpeg","-y","-i",video_path,"-i",str(new_audio),
        "-c:v","copy","-map","0:v:0","-map","1:a:0","-shortest",out_video
    ], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)

def upload_to_gcs(local: Path, bucket: str) -> str:
    client = storage.Client()
    blob = client.bucket(bucket).blob(f"stt/{local.name}")
    blob.upload_from_filename(str(local))
    return f"gs://{bucket}/stt/{local.name}"

def load_or_fetch_segments(flac_path: Path, cache_path: Path) -> List[Dict]:
    if cache_path.exists():
        return json.loads(cache_path.read_text("utf-8"))

    # 1) 测时长
    audio = AudioSegment.from_file(flac_path)
    duration_s = len(audio) / 1000.0
    print(f"音频时长: {duration_s:.1f}s")

    adaptation = SpeechAdaptation(
        phrase_set_references=[PHRASESET_NAME],
        # 你也可以同时指定 boost
        # phrase_sets=[{"name": PHRASESET_NAME, "boost": 50.0}],
    )
    client = speech.SpeechClient()
    config = RecognitionConfig(
        encoding=RecognitionConfig.AudioEncoding.FLAC,
        sample_rate_hertz=SAMPLE_RATE,
        language_code=LANGUAGE,
        enable_word_time_offsets=ENABLE_WORDTS,
        # model="video", 
        use_enhanced=True,

        # ★ 在这里把 adaptation 传进去
        adaptation=adaptation,

        speech_contexts=[speech.SpeechContext(phrases=list(KEYWORDS), boost=20.0)]
    )

    segments: List[Dict] = []

    if duration_s <= 60:
        # 短音频：一次性同步
        print("▶️ 短音频 ≤60s，走 recognize()")
        audio_req = RecognitionAudio(content=flac_path.read_bytes())
        resp = client.recognize(config=config, audio=audio_req)
        iterator = resp.results

    elif duration_s <= 600:
        # 中等长度：分片同步
        print("▶️ 中等长度 60–600s，分片 recognize()")
        CHUNK_MS = 55_000
        offset_ms = 0
        for start in range(0, len(audio), CHUNK_MS):
            end = min(start + CHUNK_MS, len(audio))
            chunk = audio[start:end]
            with tempfile.NamedTemporaryFile(suffix=".flac", delete=False) as tmpf:
                chunk.export(tmpf.name, format="flac")
                data = Path(tmpf.name).read_bytes()
            audio_req = RecognitionAudio(content=data)
            resp = client.recognize(config=config, audio=audio_req)
            for result in resp.results:
                for w in result.alternatives[0].words:
                    segments.append({
                        "word": w.word,
                        "start": w.start_time.total_seconds() + offset_ms/1000.0,
                        "end":   w.end_time.total_seconds()   + offset_ms/1000.0,
                    })
            offset_ms += (end - start)
            Path(tmpf.name).unlink()
        iterator = []  # 我们已经手动填了 segments

    else:
        # 超长：GCS + 异步
        print("▶️ 超长 >600s，上传 GCS + long_running_recognize()")
        uri = upload_to_gcs(flac_path, GCS_BUCKET)
        audio_req = RecognitionAudio(uri=uri)
        op = client.long_running_recognize(config=config, audio=audio_req)
        resp = op.result(timeout=3600)  # 给它更长的超时
        iterator = resp.results

    # 如果使用的是 recognize()/long_running_recognize()，才走这里
    if duration_s <= 60 or duration_s > 600:
        for result in iterator:
            for w in result.alternatives[0].words:
                segments.append({
                    "word":  w.word,
                    "start": w.start_time.total_seconds(),
                    "end":   w.end_time.total_seconds(),
                })

    # 缓存并返回
    cache_path.write_text(json.dumps(segments, ensure_ascii=False), "utf-8")
    return segments

def find_keyword_segments(
    segments: List[Dict], keywords: set
) -> List[Tuple[float,float,str]]:
    chars=[s["word"] for s in segments]
    starts=[s["start"] for s in segments]
    ends  =[s["end"]   for s in segments]
    max_len=max(len(k) for k in keywords)
    res=[]; i=0; N=len(chars)
    while i<N:
        m=None
        for L in range(max_len,0,-1):
            if i+L>N: continue
            w="".join(chars[i:i+L])
            if w in keywords:
                m=(starts[i],ends[i+L-1],w);break
        if m:
            res.append(m); i+=len(m[2])
        else: i+=1
    return res

def build_new_audio(flac_path: Path, segments: List[Dict]):
    targets = find_keyword_segments(segments, KEYWORDS)
    if not targets:
        print("⚠️ 无关键词，返回原声")
        return AudioSegment.from_file(flac_path)

    orig=AudioSegment.from_file(flac_path)
    out=AudioSegment.empty(); last=0
    for st,ed,kw in targets:
        a=max(0,int(st*1000)-PRE_PAD_MS)
        b=int(ed*1000)+POST_PAD_MS
        out+=orig[last:a];dur=b-a
        path=CUSTOM_MAP.get(kw)
        if not path or not Path(path).exists():
            raise FileNotFoundError(f"{kw} 替换文件缺失→{path}")
        clip=AudioSegment.from_file(path)
        if len(clip)<dur: clip*=dur//len(clip)+1
        out+=clip[:dur]; last=b
    out+=orig[last:];return out

def main():
    print(f"► 处理：{VIDEO}")
    with tempfile.TemporaryDirectory() as td:
        tmp=Path(td)
        flac=tmp/"audio.flac"
        cache=Path(VIDEO).with_suffix(".flac.json")

        extract_flac(VIDEO,flac)
        print("▶️ 识别中…")
        segs=load_or_fetch_segments(flac,cache)

        new_audio=build_new_audio(flac,segs)
        new_flac=tmp/"new.flac"
        new_audio.export(new_flac,format="flac")

        mux_back(VIDEO,new_flac,OUTPUT)
    print("✅ 完成：",OUTPUT)

if __name__=="__main__":
    from pathlib import Path
    main()
