import whisper
import pyaudio
import numpy as np
from collections import deque
import threading
import time
import torch

# 音频参数
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 1024  # 每次读取的音频帧数
# 增大此值可减少背景噪音误识别
SILENCE_THRESHOLD = 500  # 静音阈值
MIN_AUDIO_LENGTH = 2.0   # 最小处理音频长度(秒)
MAX_AUDIO_LENGTH = 10.0  # 最大缓存音频长度(秒)

class RealTimeTranscriber:
    def __init__(self):
        # 初始化Whisper模型
        # 可用模型：tiny, base, small, medium, large
        self.model = whisper.load_model("small").to("cuda" if torch.cuda.is_available() else "cpu")
        print(f"模型加载完成，使用 {'GPU' if torch.cuda.is_available() else 'CPU'} 加速")
        
        # 音频缓冲区
        self.audio_buffer = np.array([], dtype=np.float32)
        self.buffer_lock = threading.Lock()
        
        # 控制标志
        self.is_running = False
        self.is_speaking = False
        self.last_voice_time = 0
        
        # 初始化音频流
        self.p = pyaudio.PyAudio()
        self.stream = self.p.open(
            format=FORMAT,
            channels=CHANNELS,
            rate=RATE,
            input=True,
            frames_per_buffer=CHUNK,
            stream_callback=self.audio_callback
        )
    
    def audio_callback(self, in_data, frame_count, time_info, status):
        """音频数据回调函数"""
        audio_data = np.frombuffer(in_data, dtype=np.int16).astype(np.float32) / 32768.0
        
        # 检测是否在说话
        current_volume = np.abs(audio_data).mean()
        if current_volume > SILENCE_THRESHOLD / 32768.0:
            self.is_speaking = True
            self.last_voice_time = time.time()
        elif time.time() - self.last_voice_time > 0.5:  # 0.5秒无声音认为停止说话
            self.is_speaking = False
        
        # 将数据添加到缓冲区
        with self.buffer_lock:
            self.audio_buffer = np.concatenate((self.audio_buffer, audio_data))
            
            # 限制缓冲区大小
            if len(self.audio_buffer) > RATE * MAX_AUDIO_LENGTH:
                self.audio_buffer = self.audio_buffer[-int(RATE * MAX_AUDIO_LENGTH):]
        
        return (in_data, pyaudio.paContinue)
    
    def transcribe_audio(self):
        """转录音频线程函数"""
        while self.is_running:
            # 等待足够长度的音频或检测到语音结束
            with self.buffer_lock:
                audio_length = len(self.audio_buffer) / RATE
                
                if audio_length >= MIN_AUDIO_LENGTH and not self.is_speaking:
                    # 获取要处理的音频数据
                    audio_to_process = self.audio_buffer.copy()
                    self.audio_buffer = np.array([], dtype=np.float32)
                else:
                    audio_to_process = None
            
            if audio_to_process is not None and len(audio_to_process) > 0:
                try:
                    # 执行转录
                    result = self.model.transcribe(
                        audio_to_process,
                        language='zh',
                        fp16=torch.cuda.is_available(),
                        initial_prompt="以下是普通话内容。"
                    )
                    
                    # 打印结果
                    print(result["text"], end=" ", flush=True)
                except Exception as e:
                    print(f"\n转录错误: {e}")
            
            # 增大此值可减少处理频率（节省资源）
            time.sleep(0.1)  # 避免CPU占用过高
    
    def start(self):
        """启动实时转录"""
        self.is_running = True
        self.transcription_thread = threading.Thread(target=self.transcribe_audio)
        self.transcription_thread.start()
        print("实时转录已启动，开始说话吧...（按Ctrl+C停止）")
    
    def stop(self):
        """停止实时转录"""
        self.is_running = False
        self.transcription_thread.join()
        self.stream.stop_stream()
        self.stream.close()
        self.p.terminate()
        print("\n实时转录已停止")

if __name__ == "__main__":
    transcriber = RealTimeTranscriber()
    
    try:
        transcriber.start()
        while True:
            time.sleep(0.1)
    except KeyboardInterrupt:
        transcriber.stop()