import cv2
import pyaudio
import wave
import time
from queue import Queue
import webrtcvad
import threading
import pygame
import edge_tts
import asyncio
from time import sleep
import base64
from openai import OpenAI
import langid
import numpy as np
from scipy.fft import fft, ifft
# --- 配置huggingFace国内镜像 ---
import os

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# api
API_KEY = os.getenv("GenStudio_API_Key", "sk-jbf6xlaz7avigys3")
BASE_URL = os.getenv("DEFAULT_BASE_URL", "https://cloud.infini-ai.com/maas/v1")
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
model_name = "qwen2.5-vl-72b-instruct"

# 参数设置
AUDIO_RATE = 16000  # 音频采样率音频采样率，即每秒对音频信号进行采样的次数，这里设置为 16000 次，表示每秒采集 16000 个音频样本
AUDIO_CHANNELS = 1  # 音频通道数，单声道
CHUNK = 1024  # 每次读取的音频数据块大小，表示每次读取 1024 个音频样本的数据，也就是64毫秒读取一次音频数据
VAD_MODE = 3  # VAD 模式 (0-3, 数字越大越敏感)数字越大，VAD 越敏感，越容易检测到语音活动
OUTPUT_DIR = "./output"  # 输出目录
NO_SPEECH_THRESHOLD = 2  # 无效语音阈值，单位：秒
audio_file_count = 0
###
# 噪声检测参数
NOISE_LEVEL_THRESHOLD = 0.1  # 噪声水平阈值（0-1，基于能量）
INITIAL_CALIBRATION_DURATION = 2.0  # 初始校准时间（秒）

rate = 0.4
# 降噪参数
NOISE_ESTIMATION_FRAMES = 10  # 用于估计噪声的帧数（约 200ms）
NOISE_REDUCTION_FACTOR = 0.8  # 噪声减弱因子（0-1，值越大降噪越强）

# 场景配置
SCENE_CONFIG = {
    "conversation": {"threshold": 1.0, "speech_freq_threshold": 0.5},  # 快速对话
    "quiet": {"threshold": 3.0, "speech_freq_threshold": 0.2},         # 安静环境
    "default": {"threshold": 2.0, "speech_freq_threshold": 0.3}        # 默认
}

# 当前场景
CURRENT_SCENE = "default"
###
# 确保输出目录存在
os.makedirs(OUTPUT_DIR, exist_ok=True)

# 队列用于音频和视频同步缓存
audio_queue = Queue()
video_queue = Queue()

# 全局变量
last_active_time = time.time()  # 记录上次检测到语音活动的时间
recording_active = True  # 控制录制是否继续的标志
segments_to_save = []  # 存储待保存的音频数据片段
saved_intervals = []  # 记录已经保存的音频和视频片段的时间区间
last_vad_end_time = 0  # 上次保存的语音活动有效段结束时间

crossing_road = False
start_crossing_time = 0

# 初始化 WebRTC VAD
vad = webrtcvad.Vad()  # 初始化语音活动检测对象
vad.set_mode(VAD_MODE)  # 设置检测模式

# 从iat_ws_python3.py中导入必要的类和函数
from iat_ws_python3 import Ws_Param, on_message, on_error, on_close, on_open, get_recognition_result
import websocket
import ssl

# 讯飞接口配置信息
XF_APPID = '3c3833be'
XF_APIKey = 'ed5b5792eb1fc9cb3a5e1aeb5d0abb1c'
XF_APISecret = 'NTAxZjNmYzE5YzA2NjA3MTUwZjg3YzY0'


# 音频录制线程
# def audio_recorder():
#     global audio_queue, recording_active, last_active_time, segments_to_save, last_vad_end_time
#
#     p = pyaudio.PyAudio()
#     stream = p.open(format=pyaudio.paInt16,
#                     channels=AUDIO_CHANNELS,
#                     rate=AUDIO_RATE,
#                     input=True,
#                     frames_per_buffer=CHUNK)
#
#     audio_buffer = []
#     print("音频录制已开始")
#
#     while recording_active:
#         data = stream.read(CHUNK)
#         audio_buffer.append(data)
#
#         # 每 0.5 秒检测一次 VAD
#         if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
#             # 拼接音频数据并检测 VAD
#             raw_audio = b''.join(audio_buffer)
#             vad_result = check_vad_activity(raw_audio)
#
#             if vad_result:
#                 print("检测到语音活动")
#                 last_active_time = time.time()
#                 segments_to_save.append((raw_audio, time.time()))
#             else:
#                 print("静音中...")
#
#             audio_buffer = []  # 清空缓冲区
#
#         # 检查无效语音时间
#         if time.time() - last_active_time > NO_SPEECH_THRESHOLD:
#             # 检查是否需要保存
#             if segments_to_save and segments_to_save[-1][1] > last_vad_end_time:
#                 save_audio_video()
#                 last_active_time = time.time()
#             else:
#                 pass
#                 # print("无新增语音段，跳过保存")
#
#     stream.stop_stream()
#     stream.close()
#     p.terminate()

def audio_recorder():
    global audio_queue, recording_active, last_active_time, segments_to_save, last_vad_end_time, VAD_MODE, rate, CURRENT_SCENE, NO_SPEECH_THRESHOLD

    try:
        p = pyaudio.PyAudio()
        stream = p.open(format=pyaudio.paInt16, channels=AUDIO_CHANNELS, rate=AUDIO_RATE, input=True, frames_per_buffer=CHUNK)
    except Exception as e:
        print(f"无法初始化音频设备: {e}")
        recording_active = False
        return

    # 噪声水平检测
    print("正在检测环境噪声水平...")
    calibration_samples = []
    start_time = time.time()
    while time.time() - start_time < INITIAL_CALIBRATION_DURATION:
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            calibration_samples.append(data)
        except Exception as e:
            print(f"音频读取失败: {e}")
            break

    # 计算噪声能量
    audio_data = b''.join(calibration_samples)
    audio_array = np.frombuffer(audio_data, dtype=np.int16)
    noise_level = np.mean(np.abs(audio_array)) / 32768.0
    print(f"检测到噪声水平: {noise_level:.3f}")

    # 动态调整 VAD 参数
    if noise_level > NOISE_LEVEL_THRESHOLD:
        VAD_MODE, rate = 2, 0.5
    else:
        VAD_MODE, rate = 3, 0.3
    vad.set_mode(VAD_MODE)
    print(f"设置 VAD_MODE={VAD_MODE}, rate={rate}")

    audio_buffer = []
    speech_count = 0
    last_speech_time = time.time()
    print("音频录制已开始")

    while recording_active:
        try:
            data = stream.read(CHUNK, exception_on_overflow=False)
            audio_buffer.append(data)

            if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
                raw_audio = b''.join(audio_buffer)
                vad_result = check_vad_activity(raw_audio, rate)

                if vad_result:
                    print("检测到语音活动")
                    last_active_time = time.time()
                    segments_to_save.append((raw_audio, time.time()))
                    speech_count += 1
                    speech_freq = speech_count / max(time.time() - last_speech_time, 1.0)
                    if speech_freq > SCENE_CONFIG["conversation"]["speech_freq_threshold"]:
                        CURRENT_SCENE = "conversation"
                    elif speech_freq < SCENE_CONFIG["quiet"]["speech_freq_threshold"]:
                        CURRENT_SCENE = "quiet"
                    else:
                        CURRENT_SCENE = "default"
                    NO_SPEECH_THRESHOLD = SCENE_CONFIG[CURRENT_SCENE]["threshold"]
                    print(f"当前场景: {CURRENT_SCENE}, NO_SPEECH_THRESHOLD={NO_SPEECH_THRESHOLD}")
                else:
                    print("静音中...")

                audio_buffer = []

            if time.time() - last_active_time > NO_SPEECH_THRESHOLD:
                if segments_to_save and segments_to_save[-1][1] > last_vad_end_time:
                    save_audio_video()
                    last_active_time = time.time()
                    speech_count = 0
                    last_speech_time = time.time()
        except Exception as e:
            print(f"音频录制错误: {e}")
            break

    stream.stop_stream()
    stream.close()
    p.terminate()

# 视频录制线程
def video_recorder():
    global video_queue, recording_active

    cap = cv2.VideoCapture(0)  # 使用默认摄像头
    print("视频录制已开始")

    while recording_active:
        ret, frame = cap.read()
        if ret:
            video_queue.put((frame, time.time()))

            # 实时显示摄像头画面
        #           cv2.imshow("Real Camera", frame)
        #           if cv2.waitKey(1) & 0xFF == ord('q'):  # 按 Q 键退出
        #               break
        else:
            print("无法获取摄像头画面")

    cap.release()


#   cv2.destroyAllWindows()


# 检测 VAD 活动
# def check_vad_activity(audio_data):
#     # 将音频数据分块检测，设置有效激活率rate=40%，低于此比例当作静音段
#     num, rate = 0, 0.4
#     step = int(AUDIO_RATE * 0.02)  # 20ms 块大小
#     flag_rate = round(rate * len(audio_data) // step)
#
#     for i in range(0, len(audio_data), step):
#         chunk = audio_data[i:i + step]
#         if len(chunk) == step:
#             if vad.is_speech(chunk, sample_rate=AUDIO_RATE):
#                 num += 1
#
#     if num > flag_rate:
#         return True
#     return False

def check_vad_activity(audio_data, rate=0.4):
    audio_array = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32)
    step = int(AUDIO_RATE * 0.02)
    num = 0
    flag_rate = round(rate * len(audio_data) // step)

    noise_spectrum = None
    if len(audio_data) >= step * NOISE_ESTIMATION_FRAMES:
        noise_frames = audio_array[:step * NOISE_ESTIMATION_FRAMES]
        noise_fft = fft(noise_frames, n=step)
        noise_spectrum = np.abs(noise_fft) / NOISE_ESTIMATION_FRAMES
        noise_spectrum = np.tile(noise_spectrum, (len(audio_data) // step, 1))

    for i in range(0, len(audio_data), step):
        chunk = audio_array[i:i + step]
        if len(chunk) == step:
            if noise_spectrum is not None:
                chunk_fft = fft(chunk, n=step)
                chunk_magnitude = np.abs(chunk_fft)
                chunk_phase = np.angle(chunk_fft)
                reduced_magnitude = np.maximum(chunk_magnitude - NOISE_REDUCTION_FACTOR * noise_spectrum[i // step], 0)
                chunk_fft = reduced_magnitude * np.exp(1j * chunk_phase)
                chunk = np.real(ifft(chunk_fft)).astype(np.int16)
            else:
                chunk = chunk.astype(np.int16)

            if vad.is_speech(chunk.tobytes(), sample_rate=AUDIO_RATE):
                num += 1

    return num > flag_rate

# 保存音频和视频
def save_audio_video():
    global segments_to_save, video_queue, last_vad_end_time, saved_intervals, crossing_road, start_crossing_time, audio_file_count

    pygame.mixer.init()
    audio_file_count += 1
    audio_output_path = f"{OUTPUT_DIR}/audio_{audio_file_count}.wav"
    video_output_path = f"{OUTPUT_DIR}/video_{audio_file_count}.avi"

    if not segments_to_save:
        return

    if pygame.mixer.music.get_busy():
        pygame.mixer.music.stop()
        print("检测到新的有效音，已停止当前音频播放")

    start_time = segments_to_save[0][1]
    end_time = segments_to_save[-1][1]

    if saved_intervals and saved_intervals[-1][1] >= start_time:
        print("当前片段与之前片段重叠，跳过保存")
        segments_to_save.clear()
        return

    try:
        audio_frames = [seg[0] for seg in segments_to_save]
        wf = wave.open(audio_output_path, 'wb')
        wf.setnchannels(AUDIO_CHANNELS)
        wf.setsampwidth(2)
        wf.setframerate(AUDIO_RATE)
        wf.writeframes(b''.join(audio_frames))
        wf.close()
        print(f"音频保存至 {audio_output_path}")
    except Exception as e:
        print(f"保存音频失败: {e}")
        return

    video_frames = []
    while not video_queue.empty():
        frame, timestamp = video_queue.get()
        if start_time <= timestamp <= end_time:
            video_frames.append(frame)

    if video_frames:
        try:
            out = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
            for frame in video_frames:
                out.write(frame)
            out.release()
            print(f"视频保存至 {video_output_path}")
            threading.Thread(target=Inference, args=(video_output_path, audio_output_path), daemon=True).start()
        except Exception as e:
            print(f"保存视频失败: {e}")
            return
    else:
        print("无可保存的视频帧")

    saved_intervals.append((start_time, end_time))
    segments_to_save.clear()


# --- 播放音频 -
def play_audio(file_path):
    try:
        pygame.mixer.init()
        pygame.mixer.music.load(file_path)
        pygame.mixer.music.play()
        while pygame.mixer.music.get_busy():
            time.sleep(1)  # 等待音频播放结束
        print("播放完成！")
    except Exception as e:
        print(f"播放失败: {e}")
    finally:
        pygame.mixer.quit()


async def amain(TEXT, VOICE, OUTPUT_FILE) -> None:
    """Main function"""
    communicate = edge_tts.Communicate(TEXT, VOICE)
    await communicate.save(OUTPUT_FILE)


folder_path = "./Test_QWen2_VL/"


# Function to encode the image
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


def Inference(video_path, audio_path):
    global crossing_road, start_crossing_time, audio_file_count

    try:
        wsParam = Ws_Param(APPID=XF_APPID, APISecret=XF_APISecret, APIKey=XF_APIKey, AudioFile=audio_path)
        websocket.enableTrace(False)
        wsUrl = wsParam.create_url()
        ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close)
        ws.on_open = lambda w: on_open(w, wsParam)
        ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
        asr_result = get_recognition_result()
        print("讯飞语音识别结果:", asr_result)
    except Exception as e:
        print(f"语音识别失败: {e}")
        return

    if "过马路" in asr_result:
        crossing_road = True
        start_crossing_time = time.time()
        threading.Thread(target=crossing_road_recognition, daemon=True).start()

    try:
        cap = cv2.VideoCapture(video_path)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        S_index = [0.2, 0.4, 0.6, 0.8]
        frame_index = [int(total_frames * i) for i in S_index]
        base64_image = []
        for idx in frame_index:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if not ret:
                print(f"无法读取帧索引 {idx}")
                continue
            file_path = os.path.join(folder_path, f"captured_image{idx}.jpg")
            cv2.imwrite(file_path, frame)
            base64_image.append(encode_image(file_path))
        cap.release()
    except Exception as e:
        print(f"视频帧提取失败: {e}")
        return

    if not base64_image:
        print("无有效图像，跳过推理")
        return

    prompt = asr_result
    if "过马路" in asr_result:
        prompt = f"当前正在过马路，请描述图片中与过马路相关的信息，如交通信号灯状态、有无车辆等。原始语音信息：{asr_result}"
    elif "买菜" in asr_result:
        prompt = f"当前正在买菜，请描述图片中与买菜相关的信息，如摊位商品、价格标签、商品位置等。原始语音信息：{asr_result}"
    elif "走路（盲道）" in asr_result:
        prompt = f"当前正在盲道上走路，请描述图片中与盲道相关的信息，如盲道是否畅通；有无障碍物，障碍物有多远；距离多远出现拐弯等。原始语音信息：{asr_result}"

    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/jpeg;base64,{base64_image[0]}"}
                }
            ]
        }
    ]

    try:
        completion = client.chat.completions.create(model=model_name, messages=messages)
        output_text = completion.choices[0].message.content
        print("推理结果:", output_text)
    except Exception as e:
        print(f"API 调用失败: {e}")
        return

    language, _ = langid.classify(output_text)
    language_speaker = {
        "ja": "ja-JP-NanamiNeural",
        "fr": "fr-FR-DeniseNeural",
        "es": "ca-ES-JoanaNeural",
        "de": "de-DE-KatjaNeural",
        "zh": "zh-CN-XiaoyiNeural",
        "en": "en-US-AnaNeural"
    }
    used_speaker = language_speaker.get(language, "zh-CN-XiaoyiNeural")
    print(f"检测到语种: {language}, 使用音色: {used_speaker}")

    try:
        output_file = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
        asyncio.run(amain(output_text, used_speaker, output_file))
        play_audio(output_file)
    except Exception as e:
        print(f"语音合成或播放失败: {e}")

def crossing_road_recognition():
    global crossing_road, start_crossing_time, audio_file_count
    while crossing_road and time.time() - start_crossing_time < 20:
        time.sleep(5)
        if not video_queue.empty():
            try:
                frame, timestamp = video_queue.get()
                file_path = os.path.join(folder_path, f"crossing_road_image_{int(timestamp)}.jpg")
                cv2.imwrite(file_path, frame)
                base64_image = encode_image(file_path)
                prompt = "当前正在过马路，请描述图片中与过马路相关的信息，如交通信号灯状态、有无车辆等。"

                messages = [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
                        ]
                    }
                ]

                completion = client.chat.completions.create(model=model_name, messages=messages)
                output_text = completion.choices[0].message.content
                print("过马路推理结果:", output_text)

                language, _ = langid.classify(output_text)
                language_speaker = {
                    "ja": "ja-JP-NanamiNeural",  # ok
                    "fr": "fr-FR-DeniseNeural",  # ok
                    "es": "ca-ES-JoanaNeural",  # ok
                    "de": "de-DE-KatjaNeural",  # ok
                    "zh": "zh-CN-XiaoyiNeural",  # ok
                    "en": "en-US-AnaNeural",  # ok
                }
                used_speaker = language_speaker.get(language, "zh-CN-XiaoyiNeural")
                print(f"检测到语种: {language}, 使用音色: {used_speaker}")

                audio_file_count += 1
                output_file = os.path.join(folder_path, f"sft_{audio_file_count}.mp3")
                asyncio.run(amain(output_text, used_speaker, output_file))
                play_audio(output_file)
            except Exception as e:
                print(f"过马路识别错误: {e}")
    crossing_road = False


# 主函数
if __name__ == "__main__":
    try:
        # 启动音视频录制线程
        audio_thread = threading.Thread(target=audio_recorder)
        video_thread = threading.Thread(target=video_recorder)
        audio_thread.start()
        video_thread.start()

        print("按 Ctrl+C 停止录制")
        while True:
            time.sleep(1)

    except KeyboardInterrupt:
        print("录制停止中...")
        recording_active = False
        audio_thread.join()
        video_thread.join()
        print("录制已停止")