# offline_speak_proc.py
import numpy as np
import whisper
import torch
from speechbrain.pretrained import EncoderClassifier
import torchaudio
from silero_vad import load_silero_vad, get_speech_timestamps, collect_chunks
import os

# === 配置 ===
AUDIO_PATH = "meeting.wav"
SPEAKER_DB_PATH = "speaker_db.npy"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
THRESHOLD = 0.5  # 声纹匹配阈值

# === 加载模型（全部本地）===
print("加载 Whisper...")
asr_model = whisper.load_model("large-v3", device=DEVICE)

print("加载 SpeechBrain 声纹模型...")
spk_model = EncoderClassifier.from_hparams(
    source="local_models/ecapa-voxceleb",  # 使用本地路径
    run_opts={"device": DEVICE}
)

print("加载 Silero VAD...")
vad_model = load_silero_vad(device=DEVICE)

# === 工具函数 ===
def extract_speaker_embedding(signal):
    if signal.size(0) > 1:
        signal = signal[:1, :]  # 取单声道
    with torch.no_grad():
        emb = spk_model.encode_batch(signal)
    return emb.squeeze().cpu().numpy()

def cosine_sim(a, b):
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

# === 主流程 ===
# 1. 加载音频
waveform, sample_rate = torchaudio.load(AUDIO_PATH)
if sample_rate != 16000:
    waveform = torchaudio.transforms.Resample(sample_rate, 16000)(waveform)
    sample_rate = 16000

# 2. VAD 切分语音段
print("运行 VAD 切分语音...")
speech_timestamps = get_speech_timestamps(
    waveform.squeeze(), vad_model, sampling_rate=16000, threshold=0.5
)

# 3. 加载声纹库
speaker_db = np.load(SPEAKER_DB_PATH, allow_pickle=True).item()

# 4. 处理每个语音段
results = []
for i, ts in enumerate(speech_timestamps):
    start, end = ts["start"], ts["end"]
    segment = waveform[:, start:end]
    
    # 跳过太短的片段（<0.5秒）
    if segment.size(1) < 8000:  # 0.5s * 16000
        continue

    # 声纹识别
    emb = extract_speaker_embedding(segment)
    best_match, best_score = None, -1
    for name, ref_emb in speaker_db.items():
        score = cosine_sim(emb, ref_emb)
        if score > best_score:
            best_score = score
            best_match = name

    speaker = best_match if best_score > THRESHOLD else "未知"

    # Whisper 转写
    segment_np = segment.squeeze().cpu().numpy()
    text = asr_model.transcribe(segment_np, language="zh")["text"].strip()

    results.append({
        "speaker": speaker,
        "text": text,
        "start_sample": start,
        "end_sample": end
    })
    print(f"[{speaker}] {text}")

# 5. 保存结果（如 SRT 或 TXT）
with open("meeting_offline.txt", "w", encoding="utf-8") as f:
    for r in results:
        f.write(f"[{r['speaker']}] {r['text']}\n")

print("\n✅ 离线会议记录完成！结果保存至 meeting_offline.txt")