import pyaudio
import whisper
import numpy as np
import torch

# 确认 GPU 是否可用
if not torch.cuda.is_available():
    print("GPU 不可用，程序将无法使用 GPU 加速。")
else:
    print("使用 GPU 加速。")

# 加载 Whisper 模型并强制使用 GPU
model = whisper.load_model("/home/peter/Public/whisper_data/base.pt", device="cuda")

RATE = 48000
CHUNK = 8192
BUFFER_DURATION = 5  # 缓冲时间为5秒
buffer = []

# 初始化 PyAudio
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):
    info = p.get_device_info_by_index(i)
    print(f"Device {i}: {info['name']}, Sample Rate: {info['defaultSampleRate']}")
stream = p.open(format=pyaudio.paInt16,
                channels=1,
                rate=RATE,
                input=True,
                frames_per_buffer=CHUNK,
                input_device_index=4)  # 将索引替换为您的设备索引

print("开始实时翻译...")

try:
    while True:
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            audio = np.frombuffer(data, dtype=np.int16)
            buffer.append(audio)
            
            # 累积足够长的音频片段后进行翻译
            if len(buffer) >= int(RATE / CHUNK * BUFFER_DURATION):
                audio_data = np.concatenate(buffer)
                buffer = []

                # 转换为 Whisper 需要的格式
                audio_data = audio_data.astype(np.float32) / 32768.0  # 转为 float32
                result = model.transcribe(audio_data)
                
                print(f"翻译结果: {result['text']}")

        except IOError as e:
            if e.errno == -9981:  # Input overflowed
                print("Warning: Input overflowed")
                continue
            else:
                raise

except KeyboardInterrupt:
    print("停止翻译...")

finally:
    stream.stop_stream()
    stream.close()
    p.terminate()
