from fastapi import FastAPI, File, UploadFile, HTTPException, Response
import asyncio
import logging
import os
import time
import openai
import pyaudio
import requests
from pydantic import BaseModel
import uvicorn
from utils.funasr_client_api import Funasr_websocket_recognizer

# 配置参数
class Config:
    FORMAT = pyaudio.paInt16  # 音频格式
    CHANNELS = 1  # 单声道
    RATE = 16000  # 采样率
    CHUNK = int(60 * 10 / 10 / 1000 * RATE * 2)  # 每帧的大小，根据你的stride计算
    COSY_VOICE_URL = 'http://localhost:50000/inference_sft'  # 假设的文本到语音服务URL

config = Config()

# 假设的FunASR模型函数，用于语音识别
def funasr_speech_to_text(audio_bytes: bytes) -> str:
    text = ''
    rcg = Funasr_websocket_recognizer(
        host="127.0.0.1", port="10095", is_ssl=True, mode="2pass", chunk_size="0,10,5"
    )
    chunk_num = (len(audio_bytes) - 1) // config.CHUNK + 1
    for i in range(chunk_num):
        beg = i * config.CHUNK
        data = audio_bytes[beg: beg + config.CHUNK]
        resp = rcg.feed_chunk(data, wait_time=0.02)
        if len(resp) > 0 and resp['mode'] == '2pass-offline':
            text = resp['text']
    return text

# 初始化对话历史列表
conversation_history = []

# 假设的Qwen模型函数，用于意图识别和对话管理
def qwen_intent_recognition(text: str) -> str:
    openai.api_base = "http://localhost:8000/v1"
    openai.api_key = "none"
    conversation_history.append({"role": "user", "content": text})
    try:
        response = openai.ChatCompletion.create(
            model="Qwen",
            messages=conversation_history,
            stream=False,
            stop=[]
        )
        result = response.choices[0].message.content
        conversation_history.append({"role": "assistant", "content": result})
        return result
    except Exception as e:
        print(f"意图识别出现错误: {e}")
        return ""

# 音频播放函数，增加返回音频数据功能
async def play_audio_from_stream(url, data, chunk_size=8192):
    response = requests.get(url, data=data, stream=True)
    audio_data = b''
    n_channels = 1
    samp_width = 2
    framerate = 22050
    try:
        if response.status_code == 200:
            p = pyaudio.PyAudio()
            stream = p.open(format=p.get_format_from_width(samp_width),
                            channels=n_channels,
                            rate=framerate,
                            output=True)
            for chunk in response.iter_content(chunk_size=chunk_size):
                if chunk:
                    stream.write(chunk)
                    audio_data += chunk
                    await asyncio.sleep(0)
            stream.stop_stream()
            stream.close()
            p.terminate()
            return audio_data
        else:
            print(f"请求失败: 状态码 {response.status_code}，无法获取到音频数据进行播放")
            return None
    except Exception as e:
        print(f"音频播放出现错误: {e}")
        return None

# FastAPI应用
app = FastAPI()


# 语音AI交互系统的端点，调整入参为接收上传文件的形式，更符合接收音频数据的实际情况
@app.post("/voice-ai")
async def voice_ai_interaction(audio_file: UploadFile = File(...)):
    try:
        # 读取上传文件的音频字节数据
        audio_bytes = await audio_file.read()

        # 语音识别
        text = funasr_speech_to_text(audio_bytes)
        if not text:
            raise HTTPException(status_code=400, detail="语音识别失败，未获取到有效文本内容")

        # 意图识别和对话管理
        response_text = qwen_intent_recognition(text)
        if not response_text:
            raise HTTPException(status_code=400, detail="意图识别或对话管理失败，未获取到有效回复内容")
        # 音频生成
        data = {
            'tts_text': response_text,
            'spk_id': '中文男'
        }
        audio_response = await play_audio_from_stream(config.COSY_VOICE_URL, data)
        print('音频生成成功')
        if not audio_response:
            raise HTTPException(status_code=400, detail="音频生成失败，未获取到有效音频数据")
        # print(f"音频数据长度: {len(audio_response)}")
        # 返回音频和文字，使用字典包装文字内容，以二进制形式返回音频数据
        # return {
        #     "text": response_text,
        #     "audio": Response(content=audio_response, media_type="audio/wav")
        # }
        return Response(content=audio_response, media_type="audio/wav")
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 运行FastAPI应用
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
    host = os.getenv('APP_HOST', '0.0.0.0')  # 优先从环境变量获取，默认值为 0.0.0.0
    port = int(os.getenv('APP_PORT', 9000))  # 优先从环境变量获取，默认值为 9000
    try:
        uvicorn.run(app, host=host, port=port, log_level="info")
    except Exception as e:
        logging.error(f"启动 FastAPI 应用出错: {e}")