import gradio as gr
import cv2
import pyaudio
import wave
import threading
import numpy as np
import time
from queue import Queue
import webrtcvad
import os
import torch
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
from funasr import AutoModel
import pygame
import edge_tts
import asyncio
import langid
import sys

# --- 配置huggingFace国内镜像 ---
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# ---------- 全局路径设置 ----------
QWEN_MODEL_DIR = r"D:\Downloads\Qwen2-VL-2B-Instruct"
PROCESSOR_DIR = QWEN_MODEL_DIR
SENCE_VOICE_DIR = r"D:\Downloads\SenseVoiceSmall"

# 参数设置
AUDIO_RATE = 16000        # 音频采样率
AUDIO_CHANNELS = 1        # 单声道
CHUNK = 1024              # 音频块大小
VAD_MODE = 3              # VAD 模式 (0-3)
OUTPUT_DIR = "./output"   # 输出目录
NO_SPEECH_THRESHOLD = 1   # 静音阈值，单位：秒

# 确保输出目录存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs("./Test_QWen2_VL/", exist_ok=True)

# 队列用于音频和视频同步缓存
audio_queue = Queue()
video_queue = Queue()
log_queue = Queue()

# 创建线程锁
frame_lock = threading.Lock()
segments_lock = threading.Lock()
log_lock = threading.Lock()

# 全局音频 VAD 状态
last_active_time = time.time()
segments_to_save = []
saved_intervals = []
last_vad_end_time = 0

# 全局状态
recording_active = False
current_frame = None
current_log = ""
models_loaded = False
qwen_model = None
processor = None
model_senceVoice = None
audio_file_count = 0

# 初始化 VAD
global vad
vad = webrtcvad.Vad()
vad.set_mode(VAD_MODE)

# 初始化pygame mixer(仅初始化一次)
pygame.mixer.init()

# 日志记录
def log_print(message):
    global current_log
    print(message)
    with log_lock:
        current_log += message + "\n"
        log_queue.put(current_log)

# 视频帧生成
def get_current_frame():
    with frame_lock:
        if current_frame is None:
            blank = np.zeros((480,640,3), dtype=np.uint8)
            cv2.putText(blank, "Camera Off...", (220,240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255),2)
            return blank
        return current_frame.copy()  # 返回副本以避免竞态条件

# 日志生成器
def log_generator():
    while True:
        if not log_queue.empty():
            yield log_queue.get()
        else:
            with log_lock:
                yield current_log
        time.sleep(0.1)

# 参数更新回调
def update_vad_mode(val):
    global VAD_MODE, vad
    VAD_MODE = int(val)
    vad.set_mode(VAD_MODE)
    log_print(f"VAD 模式设置为 {VAD_MODE}")
    return current_log

def update_silence_threshold(val):
    global NO_SPEECH_THRESHOLD
    NO_SPEECH_THRESHOLD = float(val)
    log_print(f"静音阈值设置为 {NO_SPEECH_THRESHOLD} 秒")
    return current_log

def update_chunk_size(val):
    global CHUNK
    CHUNK = int(val)
    log_print(f"音频块大小设置为 {CHUNK}")
    return current_log

# 路径更新回调
def update_qwen_dir(path):
    global QWEN_MODEL_DIR, PROCESSOR_DIR, models_loaded
    QWEN_MODEL_DIR = path
    PROCESSOR_DIR = path
    models_loaded = False
    log_print(f"更新 Qwen2-VL 模型目录: {path}")
    return current_log

def update_sence_dir(path):
    global SENCE_VOICE_DIR, models_loaded
    SENCE_VOICE_DIR = path
    models_loaded = False
    log_print(f"更新 SenseVoice 模型目录: {path}")
    return current_log

# 模型加载
def load_models():
    global qwen_model, processor, model_senceVoice, models_loaded
    
    log_print("开始加载模型...")
    try:
        # QWen2-VL 模型
        log_print("加载 QWen2-VL 模型...")
        qwen_model = Qwen2VLForConditionalGeneration.from_pretrained(
            QWEN_MODEL_DIR, torch_dtype="auto", device_map="auto"
        )
        
        # 设置分辨率，降低现存占用
        min_pixels = 256*28*28
        max_pixels = 512*28*28
        processor = AutoProcessor.from_pretrained(PROCESSOR_DIR, min_pixels=min_pixels, max_pixels=max_pixels)
        log_print("Qwen2-VL 模型加载完成")
    except Exception as e:
        log_print(f"加载 Qwen2-VL 失败: {e}")
        return False
    
    try:
        # SenceVoice 语音识别模型
        log_print("加载 SenceVoice 语音识别模型...")
        model_senceVoice = AutoModel(model=SENCE_VOICE_DIR, trust_remote_code=True, disable_update=True)
        log_print("SenseVoice 模型加载完成")
    except Exception as e:
        log_print(f"加载 SenseVoice 失败: {e}")
        return False
    
    models_loaded = True
    log_print("所有模型加载完成")
    return True

# Gradio 绑定
def init_models():
    result = load_models() if not models_loaded else True
    return "模型加载完成" if result else "模型加载失败"

# 录制控制
def start_recording():
    global recording_active
    recording_active = True
    log_print("录制开始")
    return current_log

def stop_recording():
    global recording_active
    recording_active = False
    log_print("录制停止")
    return current_log


# 视频录制线程
def video_recorder_thread():
    
    global recording_active, video_queue, current_frame
    
    cap = None
    
    try:
        while True:
            try:
                if recording_active:
                    if cap is None:
                        cap = cv2.VideoCapture(0)
                        if not cap.isOpened():
                            log_print("无法打开摄像头")
                            time.sleep(1)
                            continue
                    
                    ret, frame = cap.read()
                    if ret:
                        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                        video_queue.put((frame, time.time()))
                        with frame_lock:
                            current_frame = frame  # 更新当前帧
                    else:
                        log_print("无法获取摄像头画面")
                        if cap is not None:
                            cap.release()
                        cap = None
                else:
                    if cap is not None:
                        cap.release()
                        cap = None
                    # 生成黑色画面
                    blank_frame = np.zeros((480, 640, 3), dtype=np.uint8)
                    cv2.putText(blank_frame, "Camera Off...", (220, 240), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
                    with frame_lock:
                        current_frame = blank_frame
                    
                time.sleep(0.03)  # 大约30FPS
                
            except Exception as e:
                log_print(f"视频线程错误: {e}")
                if cap is not None:
                    cap.release()
                    cap = None
                time.sleep(1)
    finally:
        if cap is not None:
            cap.release()

# 音频录制线程
def audio_recorder():
    global audio_queue, recording_active, last_active_time, segments_to_save, last_vad_end_time

    p = pyaudio.PyAudio()
    stream = None
    audio_buffer = []
    
    try:
        while True:
            try:
                if recording_active:
                    if stream is None:
                        stream = p.open(format=pyaudio.paInt16,
                                    channels=AUDIO_CHANNELS,
                                    rate=AUDIO_RATE,
                                    input=True,
                                    frames_per_buffer=CHUNK)
                        log_print("音频录制已开始")
                    
                    data = stream.read(CHUNK)
                    audio_buffer.append(data)
                    
                    # 每 0.5 秒检测一次 VAD
                    if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
                        # 拼接音频数据并检测 VAD
                        raw_audio = b''.join(audio_buffer)
                        vad_result = check_vad_activity(raw_audio)
                        
                        if vad_result:
                            log_print("检测到语音活动")
                            last_active_time = time.time()
                            with segments_lock:
                                segments_to_save.append((raw_audio, time.time()))
                        else:
                            log_print("静音中...")
                        
                        audio_buffer = []  # 清空缓冲区
                    
                    # 检查无效语音时间
                    current_time = time.time()
                    if current_time - last_active_time > NO_SPEECH_THRESHOLD:
                        # 检查是否需要保存
                        with segments_lock:
                            has_segments = bool(segments_to_save)
                            if has_segments:
                                last_segment_time = segments_to_save[-1][1]
                            else:
                                last_segment_time = 0
                            
                        if has_segments and last_segment_time > last_vad_end_time:
                            save_audio_video()
                            last_active_time = current_time
                else:
                    if stream is not None:
                        stream.stop_stream()
                        stream.close()
                        stream = None
                        audio_buffer = []
                        log_print("音频录制已停止")
                    time.sleep(0.1)
                    
            except Exception as e:
                log_print(f"音频录制错误: {e}")
                if stream is not None:
                    stream.stop_stream()
                    stream.close()
                    stream = None
                time.sleep(1)
    finally:
        if stream is not None:
            stream.stop_stream()
            stream.close()
        p.terminate()

# 检测 VAD 活动
def check_vad_activity(audio_data):
    # 将音频数据分块检测，设置有效激活率rate=40%，低于此比例当作静音段
    if len(audio_data) < AUDIO_RATE * 0.02:
        # 如果音频数据太短，无法进行有效检测
        return False
        
    num, rate = 0, 0.4
    step = int(AUDIO_RATE * 0.02)  # 20ms 块大小
    # 确保步长为偶数,webrtcvad要求音频长度是2的倍数
    if step % 2 != 0:
        step += 1
        
    chunks = []
    # 将音频分成20ms的块
    for i in range(0, len(audio_data), step):
        chunk = audio_data[i:i + step]
        # 确保每个块都是完整的20ms
        if len(chunk) == step:
            chunks.append(chunk)
    
    if not chunks:
        return False
        
    flag_rate = max(1, round(rate * len(chunks)))  # 至少需要1个块检测为活动

    for chunk in chunks:
        try:
            if vad.is_speech(chunk, sample_rate=AUDIO_RATE):
                num += 1
        except Exception as e:
            log_print(f"VAD检测错误: {e}")
            continue

    if num >= flag_rate:
        return True
    return False

# 保存音频和视频
def save_audio_video():
    global segments_to_save, video_queue, last_vad_end_time, saved_intervals

    # 全局变量，用于保存音频文件名计数
    global audio_file_count
    audio_file_count += 1
    audio_output_path = f"{OUTPUT_DIR}/audio_{audio_file_count}.wav"
    video_output_path = f"{OUTPUT_DIR}/video_{audio_file_count}.avi"

    with segments_lock:
        if not segments_to_save:
            return
    
        # 用于实时打断：接收到新保存文件需求，停止当前播放的音频
        if pygame.mixer.music.get_busy():
            pygame.mixer.music.stop()
            log_print("检测到新的有效音，已停止当前音频播放")

        # 获取有效段的时间范围
        start_time = segments_to_save[0][1]
        end_time = segments_to_save[-1][1]
        
        # 检查是否与之前的片段重叠
        if saved_intervals and saved_intervals[-1][1] >= start_time:
            log_print("当前片段与之前片段重叠，跳过保存")
            segments_to_save.clear()
            return
        
        # 保存音频
        audio_frames = [seg[0] for seg in segments_to_save]
        # 清空缓冲区
        segments_to_save.clear()
    
    # 锁释放后执行文件操作,减少锁持有时间
    wf = wave.open(audio_output_path, 'wb')
    wf.setnchannels(AUDIO_CHANNELS)
    wf.setsampwidth(2)  # 16-bit PCM
    wf.setframerate(AUDIO_RATE)
    wf.writeframes(b''.join(audio_frames))
    wf.close()
    log_print(f"音频保存至 {audio_output_path}")
    
    # 保存视频
    video_frames = []
    temp_queue = Queue()
    
    # 从队列中获取所有帧并检查时间戳
    while not video_queue.empty():
        frame, timestamp = video_queue.get()
        if start_time <= timestamp <= end_time:
            video_frames.append(frame)
        else:
            temp_queue.put((frame, timestamp))
    
    # 将未使用的帧放回队列
    while not temp_queue.empty():
        video_queue.put(temp_queue.get())
    
    if video_frames:
        out = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
        for frame in video_frames:
            out.write(frame)
        out.release()
        log_print(f"视频保存至 {video_output_path}")

        inference_thread = threading.Thread(target=Inference, args=(video_output_path, audio_output_path))
        inference_thread.start()
    
    # 记录保存的区间
    saved_intervals.append((start_time, end_time))

# --- 播放音频 -
def play_audio(file_path):
    try:
        # 不再需要在这里初始化pygame,已在全局初始化
        pygame.mixer.music.load(file_path)
        pygame.mixer.music.play()
        while pygame.mixer.music.get_busy():
            time.sleep(1)  # 等待音频播放结束
        log_print("播放完成！")
    except Exception as e:
        log_print(f"播放失败: {e}")

async def amain(TEXT, VOICE, OUTPUT_FILE) -> None:
    """Main function"""
    communicate = edge_tts.Communicate(TEXT, VOICE)
    await communicate.save(OUTPUT_FILE)

# 全局模型变量
qwen_model = None
processor = None
model_senceVoice = None
models_loaded = False

def Inference(TEMP_VIDEO_FILE, TEMP_AUDIO_FILE):
    global qwen_model, processor, model_senceVoice, models_loaded
    
    # 确保模型已加载
    if not models_loaded or qwen_model is None or processor is None or model_senceVoice is None:
        log_print("模型尚未加载或加载不完全，无法进行推理")
        return
    
    folder_path = "./Test_QWen2_VL/"
    
    # 确保目录存在
    os.makedirs(folder_path, exist_ok=True)
    
    try:
        cap = cv2.VideoCapture(TEMP_VIDEO_FILE)
        if not cap.isOpened():
            log_print(f"无法打开视频文件: {TEMP_VIDEO_FILE}")
            return
            
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # --- 设定视频截取帧时间比例
        S_index = [0.2, 0.4, 0.6, 0.8]
        frame_index = [int(total_frames * i) for i in S_index]
        # 设置视频帧位置
        for idx in frame_index:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if not ret:
                log_print(f"无法读取帧索引 {idx}")
            else:
                # 保存帧
                file_path = os.path.join(folder_path, f"captured_image{idx}.jpg")  # 设置保存路径
                cv2.imwrite(file_path, frame)
        
        cap.release()

        # -------- SenceVoice 推理 --start-------
        try:
            input_file = (TEMP_AUDIO_FILE)
            res = model_senceVoice.generate(
                input=input_file,
                cache={},
                language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
                use_itn=False,
            )
            # prompt = res[0]['text'].split(">")[-1]
            prompt = res[0]['text'].split(">")[-1] + "，回答简短一些，保持50字以内！"
            log_print("ASR OUT: " + prompt)
        except Exception as e:
            log_print(f"语音识别错误: {e}")
            return
        # ---------SenceVoice 推理--end----------

        MODE_FLAG = 0
        # -------- QWen2-VL 模型推理 --------- 多图模式
        try:
            if not MODE_FLAG:
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "video",
                                "video": [
                                    f'{os.path.join(folder_path, f"captured_image{frame_index[0]}.jpg")}',
                                    f'{os.path.join(folder_path, f"captured_image{frame_index[1]}.jpg")}',
                                    f'{os.path.join(folder_path, f"captured_image{frame_index[2]}.jpg")}',
                                    f'{os.path.join(folder_path, f"captured_image{frame_index[3]}.jpg")}',
                                ],
                                "fps": 1.0,
                            },
                            {"type": "text", "text": f"{prompt}"},
                        ],
                    }
                ]
            # -------- QWen2-VL 模型推理 --------- 视频模式
            else:
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {
                                "type": "video",
                                "video": f"{TEMP_VIDEO_FILE}",
                                "max_pixels": 360 * 420,
                                "fps": 1.0,
                            },
                            {"type": "text", "text": f"{prompt}"},
                        ],
                    }
                ]

            # Preparation for inference
            text = processor.apply_chat_template(
                messages, tokenize=False, add_generation_prompt=True
            )
            image_inputs, video_inputs = process_vision_info(messages)
            inputs = processor(
                text=[text],
                images=image_inputs,
                videos=video_inputs,
                padding=True,
                return_tensors="pt",
            )
            inputs = inputs.to("cuda")

            # Inference
            generated_ids = qwen_model.generate(**inputs, max_new_tokens=128)
            generated_ids_trimmed = [
                out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
            ]
            output_text = processor.batch_decode(
                generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
            )
            log_print("模型回复: " + output_text[0])
        except Exception as e:
            log_print(f"QWen2-VL模型推理错误: {e}")
            return

        # 输入文本
        text = output_text[0]
        # 语种识别 -- langid
        try:
            language, confidence = langid.classify(text)

            language_speaker = {
            "ja" : "ja-JP-NanamiNeural",            # ok
            "fr" : "fr-FR-DeniseNeural",            # ok
            "es" : "ca-ES-JoanaNeural",             # ok
            "de" : "de-DE-KatjaNeural",             # ok
            "zh" : "zh-CN-XiaoyiNeural",            # ok
            "en" : "en-US-AnaNeural",               # ok
            }

            if language not in language_speaker.keys():
                used_speaker = "zh-CN-XiaoyiNeural"
            else:
                used_speaker = language_speaker[language]
                log_print(f"检测到语种：{language}, 使用音色：{language_speaker[language]}")

            global audio_file_count
            try:
                asyncio.run(amain(text, used_speaker, os.path.join(folder_path,f"sft_{audio_file_count}.mp3")))
                play_audio(f'{folder_path}/sft_{audio_file_count}.mp3')
            except Exception as e:
                log_print(f"语音合成或播放错误: {e}")
        except Exception as e:
            log_print(f"语种检测错误: {e}")
    except Exception as e:
        log_print(f"推理过程中发生错误: {e}")


# Gradio 界面
def create_gradio_interface():
    with gr.Blocks() as demo:
        gr.Markdown("# ASR-LLM-TTS 系统")
        with gr.Row():
            with gr.Column():
                video_output = gr.Image(label="摄像头画面")
                with gr.Accordion("参数设置", open=False):
                    qwen_dir_in = gr.Textbox(value=QWEN_MODEL_DIR, label="Qwen2-VL 目录")
                    sence_dir_in = gr.Textbox(value=SENCE_VOICE_DIR, label="SenseVoice 目录")
                    # 定义滑块组件变量
                    vad_slider = gr.Slider(0, 3, value=VAD_MODE, step=1, label="VAD 模式")
                    silence_slider = gr.Slider(0.0, 5.0, value=NO_SPEECH_THRESHOLD, step=0.1, label="静音阈值 (秒)")
                    chunk_slider = gr.Slider(256, 4096, value=CHUNK, step=256, label="音频块大小")
            with gr.Column():
                log_output = gr.Textbox(lines=20, interactive=False, label="日志信息")

        with gr.Row():
            load_btn = gr.Button("加载模型")
            start_btn = gr.Button("开始录制")
            stop_btn = gr.Button("停止录制")

        # 绑定按钮事件
        load_btn.click(init_models, [], log_output)
        start_btn.click(start_recording, [], log_output)
        stop_btn.click(stop_recording, [], log_output)
        # 绑定路径文本框事件
        qwen_dir_in.change(update_qwen_dir, inputs=[qwen_dir_in], outputs=[log_output])
        sence_dir_in.change(update_sence_dir, inputs=[sence_dir_in], outputs=[log_output])
        # 绑定滑块事件
        vad_slider.change(update_vad_mode, inputs=[vad_slider], outputs=[log_output])
        silence_slider.change(update_silence_threshold, inputs=[silence_slider], outputs=[log_output])
        chunk_slider.change(update_chunk_size, inputs=[chunk_slider], outputs=[log_output])

        # 定时器更新视频帧与日志
        timer = gr.Timer(value=0.1, active=True)
        timer.tick(lambda _: get_current_frame(), inputs=[timer], outputs=[video_output])
        timer.tick(lambda _: next(log_generator()), inputs=[timer], outputs=[log_output])

    return demo

if __name__ == "__main__":
    # 初始化全局变量
    last_active_time = time.time()
    audio_file_count = 0
    
    # 确保所有输出目录存在
    os.makedirs(OUTPUT_DIR, exist_ok=True)
    os.makedirs("./Test_QWen2_VL/", exist_ok=True)
    
    # 启动后台线程
    threading.Thread(target=audio_recorder, daemon=True).start()
    threading.Thread(target=video_recorder_thread, daemon=True).start()

    # 输出启动信息
    print("ASR-LLM-TTS 系统启动中...")
    print(f"QWen2-VL模型目录: {QWEN_MODEL_DIR}")
    print(f"SenceVoice模型目录: {SENCE_VOICE_DIR}")
    print(f"输出目录: {OUTPUT_DIR}")
    
    try:
        app = create_gradio_interface()
        app.queue()
        app.launch()
    except Exception as e:
        print(f"启动Gradio界面失败: {e}")
        sys.exit(1)