import asyncio
import os

import pyaudio
from fastapi import FastAPI, File, UploadFile, HTTPException, Response
import logging
import uvicorn

from FQV2.audio_playback import play_audio_from_stream
from FQV2.dialogue_management import qwen_intent_recognition
from utils.Config import AppConfig
from utils.funasr_client_api import Funasr_websocket_recognizer

config = AppConfig()

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 初始化语音识别相关配置和对象
from speech_recognition import init_funasr_recognizer, funasr_speech_to_text

rcg = init_funasr_recognizer()

# 初始化对话历史列表，放在全局作用域
conversation_history = []

app = FastAPI()
# 语音AI交互系统的端点，调整入参为接收上传文件的形式，更符合接收音频数据的实际情况
@app.post("/voice-ai")
async def voice_ai_interaction(audio_file: UploadFile = File(...)):

    try:
        # 读取上传文件的音频字节数据
        audio_bytes = await audio_file.read()
        logging.info(f"接收到音频文件，大小为 {len(audio_bytes)} 字节")
        # 语音识别
        try:
            text = funasr_speech_to_text(audio_bytes, rcg)
        except TimeoutError as e:
            raise HTTPException(status_code=400, detail=f"语音识别超时: {e}")
        if not text:
            raise HTTPException(status_code=400, detail="语音识别失败，未获取到有效文本内容")
        logging.info(f"语音识别成功，文本内容: {text}")

        # 意图识别和对话管理
        local_conversation_history  = qwen_intent_recognition(text, conversation_history , config)
        # logging.info(f"对话历史: {local_conversation_history}")
        response_text = local_conversation_history [-1]['content']
        if not local_conversation_history:
            raise HTTPException(status_code=400,
                                detail="意图识别返回了空的对话历史，可能出现严重错误，请检查相关配置和输入。")
        if 'content' not in local_conversation_history[-1]:
            raise HTTPException(status_code=400,
                                detail="意图识别返回的对话历史格式不正确，缺少回复内容字段，请检查相关逻辑。")
        logging.info(f"意图识别和对话管理成功，回复内容: {response_text}")
        # 音频生成
        data = {
            'tts_text': response_text,
            'spk_id': '中文女'
        }
        audio_response = await play_audio_from_stream(config.COSY_VOICE_URL, data,config)
        logging.info('音频生成成功')
        if not audio_response:
            raise HTTPException(status_code=400, detail="音频生成失败，未获取到有效音频数据")

        # 返回音频和文字，使用字典包装文字内容，以二进制形式返回音频数据
        return Response(content=audio_response, media_type="audio/wav")
    except HTTPException:
        raise
    except Exception as e:
        logging.error(f"出现未知错误: {e}")
        raise HTTPException(status_code=500, detail=str(e))

def main():
    local_conversation_history = []
    FORMAT = pyaudio.paInt16  # 音频格式
    CHANNELS = 1  # 单声道
    RATE = 16000  # 采样率
    CHUNK = int(60 * 10 / 10 / 1000 * RATE * 2)  # 每帧的大小，根据你的stride计算

    audio = pyaudio.PyAudio()
    stream = audio.open(format=FORMAT, channels=CHANNELS,
                        rate=RATE, input=True,
                        frames_per_buffer=CHUNK)
    try:
        print("Start speaking...")
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        while True:
            frames = []
            for i in range(0, int(RATE / CHUNK * 10)):
                data = stream.read(CHUNK, exception_on_overflow=False)
                frames.append(data)
            audio_bytes = b''.join(frames)
            rcg1 = Funasr_websocket_recognizer(
                host="127.0.0.1", port="10095", is_ssl=True, mode="2pass", chunk_size="0,10,5"
            )
            text = funasr_speech_to_text(audio_bytes=audio_bytes, rcg=rcg1)
            print(f'用户：{text}')
            content = qwen_intent_recognition(text, local_conversation_history , config)
            print(f'Qwen_chat:{content}')
            data = {
                'tts_text': content,
                'spk_id': '中文女'
            }
            audio_task = loop.create_task(play_audio_from_stream(url=config.COSY_VOICE_URL, data=data,config=config))
            loop.run_until_complete(audio_task)
    except Exception as e:
        print("读取音频时出错:", e)
    finally:
        stream.stop_stream()
        stream.close()
        audio.terminate()

if __name__ == "__main__":
    # main()
    host = config.APP_HOST  # 优先从环境变量获取，默认值为 0.0.0.0
    port = config.APP_PORT  # 优先从环境变量获取，默认值为 9000
    try:
        uvicorn.run(app, host=host, port=port, log_level="info")
    except Exception as e:
        logging.error(f"启动 FastAPI 应用出错: {e}")