from faster_whisper import WhisperModel
import numpy as np

def detect_speaker_switch(audio_path, model_size="base.en"):
    # 加载模型（自动下载）
    model = WhisperModel(model_size, device="cpu", compute_type="int8")
    
    # 转录并获取带时间戳的片段
    segments, _ = model.transcribe(audio_path, vad_filter=True)
    segments = list(segments)  # 转为列表
    print(f"分段数量: {len(segments)}")
    for segment in segments:
        print(segment, "start:", segment.start, "end:", segment.end, "text:", segment.text, "/n")
    
    # 输出说话人切换点（实际部署需集成声纹识别）
    switch_points = []
    for segment in segments:
        print(f"[{segment.start:.1f}s → {segment.end:.1f}s]: {segment.text}")
        switch_points.append(segment.start)
    
    return switch_points

# 使用示例
switches = detect_speaker_switch("whisper_test.mp3")
print(f"说话人切换时间点：{switches}")
