import cv2
import pyaudio
import wave
import time
from queue import Queue
import webrtcvad
import threading
import pygame
import edge_tts
import asyncio
from time import sleep
import base64
from openai import OpenAI
import langid
import os
import requests
import json
import re


import json  # 新增，用于解析天气信息
import numpy as np
from scipy.fft import fft, ifft

import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
MOTOR_PIN = 4
GPIO.setup(MOTOR_PIN, GPIO.OUT)

# 启动震动提示函数
def motor_warn(duration):
    '''
    使用方法：
        vibration_thread = threading.Thread(target=motor_warn, args=(2,))
        vibration_thread.start()
    '''
    try:
        GPIO.output(MOTOR_PIN, GPIO.HIGH)  # 启动
        time.sleep(duration)
        GPIO.output(MOTOR_PIN, GPIO.LOW)   # 停止
    except Exception as e:
        print(f"马达震动控制出错: {e}")

# 引入天气API模块
from weather_api import fetchWeather, getLocation

# --- 配置huggingFace国内镜像 ---
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 历史消息列表，存储上下文
history_messages = []
MAX_HISTORY = 3  # 最大历史消息数量

# api
API_KEY = os.getenv("GenStudio_API_Key", "sk-jbf6xlaz7avigys3")
BASE_URL = os.getenv("DEFAULT_BASE_URL", "https://cloud.infini-ai.com/maas/v1")
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
model_name = "qwen2.5-vl-72b-instruct"

# 参数设置
AUDIO_RATE = 16000  # 音频采样率音频采样率，即每秒对音频信号进行采样的次数，这里设置为 16000 次，表示每秒采集 16000 个音频样本
AUDIO_CHANNELS = 1  # 音频通道数，单声道
CHUNK = 1024  # 每次读取的音频数据块大小，表示每次读取 1024 个音频样本的数据，也就是64毫秒读取一次音频数据
VAD_MODE = 3  # VAD 模式 (0-3, 数字越大越敏感)数字越大，VAD 越敏感，越容易检测到语音活动
OUTPUT_DIR = "./output"  # 输出目录
NO_SPEECH_THRESHOLD = 2  # 无效语音阈值，单位：秒
audio_file_count = 0

# 噪声检测参数
NOISE_LEVEL_THRESHOLD = 0.1  # 噪声水平阈值（0-1，基于能量）
INITIAL_CALIBRATION_DURATION = 2.0  # 初始校准时间（秒）

rate = 0.4
# 降噪参数
NOISE_ESTIMATION_FRAMES = 10  # 用于估计噪声的帧数（约 200ms）
NOISE_REDUCTION_FACTOR = 0.8  # 噪声减弱因子（0-1，值越大降噪越强）

# 场景配置
SCENE_CONFIG = {
    "conversation": {"threshold": 1.0, "speech_freq_threshold": 0.5},  # 快速对话
    "quiet": {"threshold": 3.0, "speech_freq_threshold": 0.2},         # 安静环境
    "default": {"threshold": 2.0, "speech_freq_threshold": 0.3}        # 默认
}

DEFAULT_ORIGIN = "126.681632,45.776638"
AMAP_API_KEY = "eed154fb84e93ae3f1d0fbdb6b36abec"  # 替换为实际密钥

# 当前场景
CURRENT_SCENE = "default"

# 确保输出目录存在
os.makedirs(OUTPUT_DIR, exist_ok=True)

# 队列用于音频和视频同步缓存
audio_queue = Queue()
video_queue = Queue()

# 全局变量
last_active_time = time.time()  # 记录上次检测到语音活动的时间
recording_active = True  # 控制录制是否继续的标志
segments_to_save = []  # 存储待保存的音频数据片段
saved_intervals = []  # 记录已经保存的音频和视频片段的时间区间
last_vad_end_time = 0  # 上次保存的语音活动有效段结束时间

# 初始化 WebRTC VAD
vad = webrtcvad.Vad()  # 初始化语音活动检测对象
vad.set_mode(VAD_MODE)  # 设置检测模式

# 从iat_ws_python3.py中导入必要的类和函数
from iat_ws_python3 import Ws_Param, on_message, on_error, on_close, on_open, get_recognition_result
import websocket
import ssl

# 讯飞接口配置信息
XF_APPID = '3c3833be'
XF_APIKey = 'ed5b5792eb1fc9cb3a5e1aeb5d0abb1c'
XF_APISecret = 'NTAxZjNmYzE5YzA2NjA3MTUwZjg3YzY0'

# 特殊场景标志
crossing_road = False
crossing_road_start_time = 0
selecting_clothes = False  # 新增，挑选衣服场景标志

def audio_recorder():
    global audio_queue, recording_active, last_active_time, segments_to_save, last_vad_end_time, VAD_MODE, rate, CURRENT_SCENE, NO_SPEECH_THRESHOLD

    try:
        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paInt16, channels=AUDIO_CHANNELS, rate=AUDIO_RATE, input=True, frames_per_buffer=CHUNK)
    except Exception as e:
        print(f"无法初始化音频设备: {e}")
        recording_active = False
        return

    # 噪声水平检测
    print("正在检测环境噪声水平...")
    calibration_samples = []
    start_time = time.time()
    while time.time() - start_time < INITIAL_CALIBRATION_DURATION:
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            calibration_samples.append(data)
        except Exception as e:
            print(f"音频读取失败: {e}")
            break

    # 计算噪声能量
    audio_data = b''.join(calibration_samples)
    audio_array = np.frombuffer(audio_data, dtype=np.int16)
    noise_level = np.mean(np.abs(audio_array)) / 32768.0
    print(f"检测到噪声水平: {noise_level:.3f}")

    # 动态调整 VAD 参数
    if noise_level > NOISE_LEVEL_THRESHOLD:
        VAD_MODE, rate = 2, 0.5
    else:
        VAD_MODE, rate = 3, 0.3
    vad.set_mode(VAD_MODE)
    print(f"设置 VAD_MODE={VAD_MODE}, rate={rate}")

    audio_buffer = []
    speech_count = 0
    last_speech_time = time.time()
    print("音频录制已开始")

    while recording_active:
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            audio_buffer.append(data)

            if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
                raw_audio = b''.join(audio_buffer)
                vad_result = check_vad_activity(raw_audio, rate)

                if vad_result:
                    print("检测到语音活动")
                    last_active_time = time.time()
                    segments_to_save.append((raw_audio, time.time()))
                    speech_count += 1
                    speech_freq = speech_count / max(time.time() - last_speech_time, 1.0)
                    if speech_freq > SCENE_CONFIG["conversation"]["speech_freq_threshold"]:
                        CURRENT_SCENE = "conversation"
                    elif speech_freq < SCENE_CONFIG["quiet"]["speech_freq_threshold"]:
                        CURRENT_SCENE = "quiet"
                    else:
                        CURRENT_SCENE = "default"
                    NO_SPEECH_THRESHOLD = SCENE_CONFIG[CURRENT_SCENE]["threshold"]
                    print(f"当前场景: {CURRENT_SCENE}, NO_SPEECH_THRESHOLD={NO_SPEECH_THRESHOLD}")
                else:
                    print("静音中...")

                audio_buffer = []

            if time.time() - last_active_time > NO_SPEECH_THRESHOLD:
                if segments_to_save and segments_to_save[-1][1] > last_vad_end_time:
                    save_audio_video()
                    last_active_time = time.time()
                    speech_count = 0
                    last_speech_time = time.time()
        except Exception as e:
            print(f"音频录制错误: {e}")
            break

    stream.stop_stream()
    stream.close()
    p.terminate()
# 视频录制线程
def video_recorder():
    global video_queue, recording_active

    cap = cv2.VideoCapture(0)  # 使用默认摄像头
    print("视频录制已开始")

    while recording_active:
        ret, frame = cap.read()
        if ret:
            video_queue.put((frame, time.time()))

            # 实时显示摄像头画面
            # cv2.imshow("Real Camera", frame)
            # if cv2.waitKey(1) & 0xFF == ord('q'):  # 按 Q 键退出
            #     break
        else:
            print("无法获取摄像头画面")

    cap.release()
    # cv2.destroyAllWindows()

def check_vad_activity(audio_data, rate=0.4):
    audio_array = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32)
    step = int(AUDIO_RATE * 0.02)
    num = 0
    flag_rate = round(rate * len(audio_data) // step)

    noise_spectrum = None
    if len(audio_data) >= step * NOISE_ESTIMATION_FRAMES:
        noise_frames = audio_array[:step * NOISE_ESTIMATION_FRAMES]
        noise_fft = fft(noise_frames, n=step)
        noise_spectrum = np.abs(noise_fft) / NOISE_ESTIMATION_FRAMES
        noise_spectrum = np.tile(noise_spectrum, (len(audio_data) // step, 1))

    for i in range(0, len(audio_data), step):
        chunk = audio_array[i:i + step]
        if len(chunk) == step:
            if noise_spectrum is not None:
                chunk_fft = fft(chunk, n=step)
                chunk_magnitude = np.abs(chunk_fft)
                chunk_phase = np.angle(chunk_fft)
                reduced_magnitude = np.maximum(chunk_magnitude - NOISE_REDUCTION_FACTOR * noise_spectrum[i // step], 0)
                chunk_fft = reduced_magnitude * np.exp(1j * chunk_phase)
                chunk = np.real(ifft(chunk_fft)).astype(np.int16)
            else:
                chunk = chunk.astype(np.int16)

            if vad.is_speech(chunk.tobytes(), sample_rate=AUDIO_RATE):
                num += 1

    return num > flag_rate

# 保存音频和视频
def save_audio_video():
    pygame.mixer.init()
    global segments_to_save, video_queue, last_vad_end_time, saved_intervals
    global audio_file_count, crossing_road, crossing_road_start_time

    audio_file_count += 1
    audio_output_path = f"{OUTPUT_DIR}/audio_{audio_file_count}.wav"
    video_output_path = f"{OUTPUT_DIR}/video_{audio_file_count}.avi"

    if not segments_to_save:
        return

    # 用于实时打断：接收到新保存文件需求，停止当前播放的音频
    if pygame.mixer.music.get_busy():
        pygame.mixer.music.stop()
        print("检测到新的有效音，已停止当前音频播放")

    # 获取有效段的时间范围
    start_time = segments_to_save[0][1]
    end_time = segments_to_save[-1][1]

    # 检查是否与之前的片段重叠
    if saved_intervals and saved_intervals[-1][1] >= start_time:
        print("当前片段与之前片段重叠，跳过保存")
        segments_to_save.clear()
        return

    # 保存音频
    audio_frames = [seg[0] for seg in segments_to_save]
    wf = wave.open(audio_output_path, 'wb')
    wf.setnchannels(AUDIO_CHANNELS)
    wf.setsampwidth(2)  # 16-bit PCM
    wf.setframerate(AUDIO_RATE)
    wf.writeframes(b''.join(audio_frames))
    wf.close()
    print(f"音频保存至 {audio_output_path}")

    # 保存视频
    video_frames = []
    while not video_queue.empty():
        frame, timestamp = video_queue.get()
        if start_time <= timestamp <= end_time:
            video_frames.append(frame)

    if video_frames:
        out = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
        for frame in video_frames:
            out.write(frame)
        out.release()
        print(f"视频保存至 {video_output_path}")

        # --- 使用线程执行推理
        inference_thread = threading.Thread(target=Inference, args=(video_output_path, audio_output_path))
        inference_thread.start()
    else:
        pass
        # print("无可保存的视频帧")

    # 记录保存的区间
    saved_intervals.append((start_time, end_time))

    # 清空缓冲区
    segments_to_save.clear()

# --- 播放音频 -
def play_audio(file_path):
    try:
        pygame.mixer.init()
        pygame.mixer.music.load(file_path)
        pygame.mixer.music.play()
        while pygame.mixer.music.get_busy():
            time.sleep(1)  # 等待音频播放结束
        print("播放完成！")
    except Exception as e:
        print(f"播放失败: {e}")
    finally:
        pygame.mixer.quit()

async def amain(TEXT, VOICE, OUTPUT_FILE) -> None:
    """Main function"""
    communicate = edge_tts.Communicate(TEXT, VOICE)
    await communicate.save(OUTPUT_FILE)

folder_path = "./Test_QWen2_VL/"
special_folder_path = "./Test_QWen/"  # 红绿灯场景图片保存文件夹
os.makedirs(special_folder_path, exist_ok=True)

# Function to encode the image
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


def geocode_address(address):
    """将地名转换为经纬度"""
    url = "https://restapi.amap.com/v3/geocode/geo"
    params = {
        "key": AMAP_API_KEY,
        "address": address,
        "output": "json"
    }
    try:
        response = requests.get(url, params=params, timeout=5)
        data = response.json()
        if data["status"] == "1" and data["geocodes"]:
            return data["geocodes"][0]["location"]  # 返回经纬度，如 "116.482499,39.991475"
        else:
            print(f"地理编码错误: {data.get('info', '未知错误')}")
            return None
    except Exception as e:
        print(f"地理编码 API 调用失败: {e}")
        return None
    
def get_navigation_directions(origin, destination):
    """调用高德地图 API 获取步行导航指令"""
    AMAP_BASE_URL = "https://restapi.amap.com/v3/direction/walking"
    # 尝试将目的地转换为经纬度
    destination_coords = geocode_address(destination)
    if not destination_coords:
        return ["未找到目的地，请提供更具体的地址（如‘北京星巴克’）"]

    try:
        params = {
            "key": AMAP_API_KEY,
            "origin": origin,
            "destination": destination_coords,  # 使用经纬度
            "output": "json"
        }
        for _ in range(3):  # 重试 3 次
            try:
                response = requests.get(AMAP_BASE_URL, params=params, timeout=5)
                data = response.json()
                if data["status"] == "1" and data["route"]["paths"]:
                    steps = data["route"]["paths"][0]["steps"]
                    instructions = [step["instruction"] for step in steps]
                    return instructions
                else:
                    print("地图 API 返回错误:", data.get("info", "未知错误"))
                    return ["无法获取导航信息，请重试"]
            except Exception as e:
                print(f"地图 API 调用失败: {e}")
                time.sleep(1)
        return ["导航服务不可用"]
    except Exception as e:
        print(f"导航 API 调用失败: {e}")
        return ["导航服务不可用"]
    
def Inference(TEMP_VIDEO_FILE, TEMP_AUDIO_FILE):
    global crossing_road, crossing_road_start_time, audio_file_count,history_messages,selecting_clothes

    # 调用讯飞语音识别接口
    wsParam = Ws_Param(APPID=XF_APPID, APISecret=XF_APISecret,
                       APIKey=XF_APIKey,
                       AudioFile=TEMP_AUDIO_FILE)
    websocket.enableTrace(False)
    wsUrl = wsParam.create_url()
    # 修改
    ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close)
    # ws.on_open = on_open
    ws.on_open = lambda w: on_open(w, wsParam)
    ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})

    # 获取识别结果
    asr_result = get_recognition_result()
    print("讯飞语音识别结果:", asr_result)

    # 检测导航意图
    destination = None
    if "去" in asr_result and len(asr_result.split("去")) > 1:
        destination = asr_result.split("去")[1].strip()
        print(f"检测到导航目标: {destination}")

        # 获取导航指令
        origin = DEFAULT_ORIGIN
        navigation_instructions = get_navigation_directions(origin, destination)
        print("导航指令:", navigation_instructions)

        # 准备语音播报内容
        if navigation_instructions and navigation_instructions[0] not in [
            "未找到目的地，请提供更具体的地址（如‘哈尔滨市政府’）",
            "无法获取导航信息，请重试",
            "导航服务不可用"
        ]:
            output_text = f"导航指令：{'。'.join(navigation_instructions[:3])}。"  # 最多播报前3条指令
        else:
            output_text = navigation_instructions[0]  # 播报错误提示

        # 更新历史消息
        history_messages.append({"role": "user", "content": asr_result})
        history_messages.append({"role": "assistant", "content": output_text})
        if len(history_messages) > MAX_HISTORY * 2:
            history_messages = history_messages[-MAX_HISTORY * 2:]

        # 语音合成与播放
        language, _ = langid.classify(output_text)
        language_speaker = {
            "ja": "ja-JP-NanamiNeural", "fr": "fr-FR-DeniseNeural", "es": "ca-ES-JoanaNeural",
            "de": "de-DE-KatjaNeural", "zh": "zh-CN-XiaoyiNeural", "en": "en-US-AnaNeural"
        }
        used_speaker = language_speaker.get(language, "zh-CN-XiaoyiNeural")
        print("检测到语种：", language, "使用音色：", used_speaker)

        audio_file_count += 1
        output_audio = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
        asyncio.run(amain(output_text, used_speaker, output_audio))
        play_audio(output_audio)
        return  # 导航完成后直接返回


    # 检测是否进入过马路场景
    if "过马路" in asr_result:
        crossing_road = True
        crossing_road_start_time = time.time()
        print("进入过马路场景")

    # 检测是否进入挑选衣服场景
    if "挑选衣服" in asr_result:
        selecting_clothes = True
        print("进入挑选衣服场景")

    if crossing_road:
        while time.time() - crossing_road_start_time < 60:
            start_time = time.time()
            video_frames = []
            while time.time() - start_time < 5:  # 收集5秒的视频帧
                if not video_queue.empty():
                    frame, timestamp = video_queue.get()
                    video_frames.append((frame, timestamp))

            if video_frames:
                total_frames = len(video_frames)
                # --- 设定视频截取帧时间比例
                S_index = [0.2, 0.4, 0.6, 0.8]
                frame_index = [int(total_frames * i) for i in S_index]
                # 设置视频帧位置
                base64_image = []
                for idx in frame_index:
                    frame, _ = video_frames[idx]
                    # 保存帧到特殊文件夹，并添加前缀
                    file_path = os.path.join(special_folder_path, f"crossing_road_captured_image{idx}.jpg")  # 设置保存路径
                    cv2.imwrite(file_path, frame)
                    # 编码图像
                    base64_image.append(encode_image(file_path))

                # 根据场景修改prompt
                prompt = f"当前正在过马路，请简短快速描述图片中与过马路相关的信息，如交通信号灯状态、道路是否畅通、有无车辆驶来等。原始语音信息：{asr_result}"

                messages = history_messages.copy()  # 复制历史消息
                messages.append({
                    "role": "user",
                    "content": [
                        {"type": "text", "text": f"{prompt}"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{base64_image[0]}",
                                "url": f"data:image/jpeg;base64,{base64_image[1]}",
                                "url": f"data:image/jpeg;base64,{base64_image[2]}",
                                "url": f"data:image/jpeg;base64,{base64_image[3]}",
                            },
                        },
                    ],
                })

                model_name = "qwen2.5-vl-72b-instruct"

                completion = client.chat.completions.create(
                    model=model_name,
                    messages=messages,
                )

                output_text = completion.choices[0].message.content
                print(completion.choices[0].message.content)

                 # 更新历史消息
                history_messages.append({
                    "role": "user",
                    "content": prompt  # 只存文本提示，忽略图片以节省内存
                })
                history_messages.append({
                    "role": "assistant",
                    "content": output_text
                })
                if len(history_messages) > MAX_HISTORY * 2:  # 每条用户和助手消息算一对
                    history_messages = history_messages[-MAX_HISTORY * 2:]

                # 输入文本
                text = output_text
                # 语种识别 -- langid
                language, confidence = langid.classify(text)

                language_speaker = {
                    "ja": "ja-JP-NanamiNeural",  # ok
                    "fr": "fr-FR-DeniseNeural",  # ok
                    "es": "ca-ES-JoanaNeural",  # ok
                    "de": "de-DE-KatjaNeural",  # ok
                    "zh": "zh-CN-XiaoyiNeural",  # ok
                    "en": "en-US-AnaNeural",  # ok
                }

                if language not in language_speaker.keys():
                    used_speaker = "zh-CN-XiaoyiNeural"
                else:
                    used_speaker = "zh-CN-XiaoyiNeural"
                    print("检测到语种：", language, "使用音色：", language_speaker[language])

                # 更新 audio_file_count
                audio_file_count += 1
                # 将特殊场景的语音文件保存到 special_folder_path
                asyncio.run(amain(text, used_speaker, os.path.join(special_folder_path, f"sft_{audio_file_count}.mp3")))
                play_audio(os.path.join(special_folder_path, f"sft_{audio_file_count}.mp3"))

            if time.time() - crossing_road_start_time < 60:
                time.sleep(5)

        crossing_road = False
        print("结束过马路场景")

    # 挑选衣服场景处理
    if selecting_clothes:
        # 获取天气信息
        location = getLocation()
        weather_result = fetchWeather(location)
        weather_data = json.loads(weather_result)
        temperature = weather_data["results"][0]["now"]["temperature"]
        weather_text = weather_data["results"][0]["now"]["text"]

        cap = cv2.VideoCapture(TEMP_VIDEO_FILE)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # --- 设定视频截取帧时间比例
        S_index = [0.2, 0.4, 0.6, 0.8]
        frame_index = [int(total_frames * i) for i in S_index]
        # 设置视频帧位置
        base64_image = []
        for idx in frame_index:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if not ret:
                print(f"无法读取帧索引 {idx}")
            else:
                # 保存帧
                file_path = os.path.join(folder_path, f"captured_image{idx}.jpg")  # 设置保存路径
                cv2.imwrite(file_path, frame)
                # 编码图像
                base64_image.append(encode_image(file_path))

        cap.release()

        # 根据天气信息修改prompt
        prompt = f"当前正在挑选衣服，当前天气 {weather_text}，温度 {temperature}°C。请告诉我当前天气以及图片中适合穿的衣服,以纯文本输出。原始语音信息：{asr_result}"

        MODE_FLAG = 0
        # -------- QWen2-VL 模型推理 --------- 多图模式
        if not MODE_FLAG:
            messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": f"{prompt}"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{base64_image[0]}",
                                "url": f"data:image/jpeg;base64,{base64_image[1]}",
                                "url": f"data:image/jpeg;base64,{base64_image[2]}",
                                "url": f"data:image/jpeg;base64,{base64_image[3]}",
                            },
                        },
                    ],
                }
            ]

        # -------- QWen2-VL 模型推理 --------- 视频模式
        else:
            messages = [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "video",
                            "video": f"{TEMP_VIDEO_FILE}",
                            "max_pixels": 360 * 420,
                            "fps": 1.0,
                        },
                        {"type": "text", "text": f"{prompt}"},
                    ],
                }
            ]

        model_name = "qwen2.5-vl-72b-instruct"

        completion = client.chat.completions.create(
            model=model_name,
            messages=messages,
        )

        output_text = completion.choices[0].message.content
        print(completion.choices[0].message.content)

        # 输入文本
        text = output_text
        # 语种识别 -- langid
        language, confidence = langid.classify(text)

        language_speaker = {
            "ja": "ja-JP-NanamiNeural",  # ok
            "fr": "fr-FR-DeniseNeural",  # ok
            "es": "ca-ES-JoanaNeural",  # ok
            "de": "de-DE-KatjaNeural",  # ok
            "zh": "zh-CN-XiaoyiNeural",  # ok
            "en": "en-US-AnaNeural",  # ok
        }

        if language not in language_speaker.keys():
            used_speaker = "zh-CN-XiaoyiNeural"
        else:
            used_speaker = "zh-CN-XiaoyiNeural"
            print("检测到语种：", language, "使用音色：", language_speaker[language])

        asyncio.run(amain(text, used_speaker, os.path.join(folder_path, f"sft_{audio_file_count}.mp3")))
        play_audio(f'{folder_path}/sft_{audio_file_count}.mp3')

        selecting_clothes = False
        print("结束挑选衣服场景")

    # 非过马路场景处理
    if not crossing_road and "过马路" not in asr_result and not selecting_clothes and "挑选衣服" not in asr_result:
        cap = cv2.VideoCapture(TEMP_VIDEO_FILE)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # --- 设定视频截取帧时间比例
        S_index = [0.2, 0.4, 0.6, 0.8]
        frame_index = [int(total_frames * i) for i in S_index]
        # 设置视频帧位置
        base64_image = []
        for idx in frame_index:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if not ret:
                print(f"无法读取帧索引 {idx}")
            else:
                # 保存帧
                file_path = os.path.join(folder_path, f"captured_image{idx}.jpg")  # 设置保存路径
                cv2.imwrite(file_path, frame)
                # 编码图像
                base64_image.append(encode_image(file_path))

        cap.release()

        # 根据语音关键词修改prompt
        if "买菜" in asr_result:
            prompt = f"当前正在买菜，请描述图片中与买菜相关的信息，如摊位商品、价格标签、商品位置等。原始语音信息：{asr_result}"
        elif "走路（盲道）" in asr_result:
            prompt = f"当前正在盲道上走路，请描述图片中与盲道相关的信息，如盲道是否畅通；有无障碍物，障碍物有多远；距离多远出现拐弯等。原始语音信息：{asr_result}"
        else:
            prompt = asr_result

        # 构造消息，包含历史上下文
            messages = history_messages.copy()
            messages.append({
                "role": "user",
                "content": [
                    {"type": "text", "text": f"{prompt}"},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image[0]}",
                            "url": f"data:image/jpeg;base64,{base64_image[1]}",
                            "url": f"data:image/jpeg;base64,{base64_image[2]}",
                            "url": f"data:image/jpeg;base64,{base64_image[3]}",
                        },
                    },
                ],

            })

        model_name = "qwen2.5-vl-72b-instruct"

        completion = client.chat.completions.create(
            model=model_name,
            messages=messages,
        )

        output_text = completion.choices[0].message.content
        print(completion.choices[0].message.content)

        # 更新历史消息
        history_messages.append({
            "role": "user",
            "content": prompt
        })
        history_messages.append({
            "role": "assistant",
            "content": output_text
        })
        if len(history_messages) > MAX_HISTORY * 2:
            history_messages = history_messages[-MAX_HISTORY * 2:]

        # 输入文本
        text = output_text
        # 语种识别 -- langid
        language, confidence = langid.classify(text)

        language_speaker = {
            "ja": "ja-JP-NanamiNeural",  # ok
            "fr": "fr-FR-DeniseNeural",  # ok
            "es": "ca-ES-JoanaNeural",  # ok
            "de": "de-DE-KatjaNeural",  # ok
            "zh": "zh-CN-XiaoyiNeural",  # ok
            "en": "en-US-AnaNeural",  # ok
        }

        if language not in language_speaker.keys():
            used_speaker = "zh-CN-XiaoyiNeural"
        else:
            used_speaker = "zh-CN-XiaoyiNeural"
            print("检测到语种：", language, "使用音色：", language_speaker[language])

        asyncio.run(amain(text, used_speaker, os.path.join(folder_path, f"sft_{audio_file_count}.mp3")))
        play_audio(f'{folder_path}/sft_{audio_file_count}.mp3')


# 主函数
if __name__ == "__main__":
    try:
        # 启动音视频录制线程
        audio_thread = threading.Thread(target=audio_recorder)
        video_thread = threading.Thread(target=video_recorder)
        audio_thread.start()
        video_thread.start()

        print("按 Ctrl+C 停止录制")
        while True:
            time.sleep(1)

    except KeyboardInterrupt:
        print("录制停止中...")
        recording_active = False
        audio_thread.join()
        video_thread.join()
        print("录制已停止")