from src.WenXin_GPT.audio_processing.audio_to_text import recognize_audio
import struct
from src.WenXin_GPT.audio_processing.text_to_audio import text_to_speech     # 导入文本转语音函数
# from src.MiniMax_GPT.audio_processing.text_to_audio import text_to_speech
from src.WenXin_GPT.chatgpt_processing.wenxin_gpt import ask_wenxin               # 导入问答函数
from src.MiniMax_GPT.chatgpt_processing.minimax_gpt import ask_minimax            
from src.Dify_GPT.chatgpt_processing.dify_gpt import ask_dify
import logging
import os
from dotenv import load_dotenv

# 找到 prompts.env 文件路径
env_path = os.path.join(os.path.dirname(__file__), './prompts/prompts.env')

# 加载 .env 文件
load_dotenv(dotenv_path=env_path)

# 获取 prompt 变量
prompt = os.getenv('PROMPT')
prompt_god = os.getenv('PROMPT_GOD')

def parse_wav_header(header_data):
    # 解析WAV文件头部的44字节
    if len(header_data) < 44:
        raise ValueError("Invalid WAV header length")

    # 使用struct.unpack解析头部数据
    unpacked_data = struct.unpack('<4sL4s4sLHHLLHH4sL', header_data[:44])

    # 提取所需信息
    wav_header = {
        "ChunkID": unpacked_data[0].decode(),
        "ChunkSize": unpacked_data[1],
        "Format": unpacked_data[2].decode(),
        "Subchunk1ID": unpacked_data[3].decode(),
        "Subchunk1Size": unpacked_data[4],
        "AudioFormat": unpacked_data[5],
        "NumChannels": unpacked_data[6],
        "SampleRate": unpacked_data[7],
        "ByteRate": unpacked_data[8],
        "BlockAlign": unpacked_data[9],
        "BitsPerSample": unpacked_data[10],
        "Subchunk2ID": unpacked_data[11].decode(),
        "Subchunk2Size": unpacked_data[12],
    }

    return wav_header

# 判断函数，判断结束标识符是否为符合条件的字符串(即是否包含EOF)，如果是则返回True，否则返回False
def is_end_of_audio(message):
    if isinstance(message, str):
        return "EOF" in message

# 编写一个函数，处理好保存的音频文件
async def process_and_send_audio(websocket,recognition_result,prompt):
    # 调用文本转语音函数，并实时发送每块回答给设备
    try:
        # async for response_chunk in ask_wenxin(recognition_result):
        # async for response_chunk in ask_dify(recognition_result):
        async for response_chunk in ask_minimax(recognition_result, prompt):
            response_wenxin = response_chunk
            response_audio = await text_to_speech(response_wenxin)
            # await websocket.send(response_chunk)  # 使用await来异步发送每一块数据
            await websocket.send(response_audio)

    except Exception as e:
        logging.debug(f"Error in process_and_send_audio: {e}")
    finally:
        # 给设备发送音频结束标志
        await websocket.send("EOF")
        logging.info('给设备发送音频结束标志✅EOF')

async def handle_audio(websocket):
    logging.info('新的设备已连接到WebSocket')
    audio_chunks = []
    wav_header_received = False

    # 这个时候就开始调用语音回复缓冲方法提前准备好正式回复前的音频（调用GPT生成回复前的’我想想、嗯嗯、好问题、等‘，然后马上转音频，并等待EOF，EOF一到，就发送给硬件，做到无缝衔接）
    try:
        async for message in websocket:
            if isinstance(message, bytes):
                # 接收到的是二进制数据
                if not wav_header_received:
                    # 解析WAV头部信息
                    wav_header = parse_wav_header(message)
                    logging.info(f"Received WAV header: {wav_header}")
                    wav_header_received = True    
                else:
                    # 保存音频数据块
                    audio_chunks.append(message)
            elif is_end_of_audio(message):
                # 接收到音频结束标志
                logging.info('接收到音频结束标志')
                if audio_chunks:
                    # recognition_result = await do_audio(audio_chunks)
                    recognition_result = await recognize_audio(audio_chunks)
                    audio_chunks = []
                    wav_header_received = False
                    prompt = ''             # 初始化prompt
                    logging.warning("message：%s", message)
                    if message == 'EOF':
                        prompt = prompt
                        logging.warning("采用默认提示词")
                    elif message == 'EOF_GOD':
                        prompt = prompt_god
                        logging.warning("采用财神提示词")
                    # 编写一个函数，处理好保存的音频文件，然后调用文本转语音函数，将回答发送给设备
                    await process_and_send_audio(websocket, recognition_result, prompt)

    except Exception as e:
        logging.error(f"Error in handle_audio: {e}")

    logging.warning('设备已断开连接')  # 连接断开时的消息



