import gradio as gr
import sounddevice as sd
import numpy as np
from scipy.io.wavfile import write
from data import use_faster_whisper, ollama_api
import webrtcvad  # 用于语音活动检测
import threading

# 定义全局变量
audio_data = []
is_recording = False
samplerate = 16000  # 采样率，VAD通常使用16kHz
frame_duration_ms = 30  # 每帧时长（毫秒）
frame_size = int(samplerate * (frame_duration_ms / 1000.0))  # 每帧的样本数
vad = webrtcvad.Vad(3)  # 设置VAD灵敏度级别（0-3）


def callback(indata, frames, time, status):
    if is_recording:
        audio_data.append(indata.copy())
        # 确保每帧的长度正确
        if len(indata) == frame_size:
            # 将浮点数转换为16位整数
            indata_int16 = (indata * 32767).astype(np.int16)
            # 检测语音活动
            if not vad.is_speech(indata_int16.tobytes(), samplerate):
                stop_recording_and_process()


stream = sd.InputStream(samplerate=samplerate, channels=1, callback=callback, blocksize=frame_size)


def start_recording():
    global is_recording, audio_data
    is_recording = True
    audio_data = []  # 清空之前的录音数据
    print("开始录音...")
    # 启动录音流
    stream.start()


def stop_recording_and_process():
    global is_recording
    if not is_recording:
        return "请先开始录音！"

    is_recording = False
    print("停止录音.")

    # 确认是否有音频数据
    if not audio_data:
        return "没有录制到任何音频数据。"

    # 将累积的数据转换为NumPy数组
    recorded_audio = np.concatenate(audio_data, axis=0)

    # 保存录音文件
    filename = "recorded_audio.wav"
    write(filename, samplerate, (recorded_audio * 32767).astype(np.int16))  # 转换为int16

    # 打印一些调试信息
    print(f"已保存录音文件: {filename}")

    # 调用语音识别服务
    recognized_text = use_faster_whisper.transcription(filename, "zh")
    print(f"识别的文字: {recognized_text}")

    model_name = 'llama3.1'
    messages = [{'role': 'user', 'content': recognized_text}]
    response = ollama_api.ollama_chat(model_name, messages)
    print(f"模型回复: {response}")
    stream.stop()
    return f"你说: {recognized_text}\n回复: {response}"


# 创建Gradio界面
with gr.Blocks() as demo:
    start_button = gr.Button("开始录音", variant="primary")
    output = gr.Textbox()

    # 绑定按钮事件
    start_button.click(start_recording, outputs=output)

# 创建并启动Gradio界面
demo.launch()
