import asyncio
import logging
import os
import time
import openai
import pyaudio
import requests
from fastapi import FastAPI, File, UploadFile
from pydantic import BaseModel
import uvicorn
from utils.funasr_client_api import Funasr_websocket_recognizer

# 提取配置参数到统一的配置类（这里简单示意，可进一步完善配置读取等功能）
class Config:
    FORMAT = pyaudio.paInt16  # 音频格式
    CHANNELS = 1  # 单声道
    RATE = 16000  # 采样率
    CHUNK = int(60 * 10 / 10 / 1000 * RATE * 2)  # 每帧的大小，根据你的stride计算
    COSY_VOICE_URL = 'http://localhost:50000/inference_sft'


config = Config()


# 假设的FunASR模型函数，用于语音识别
def funasr_speech_to_text(audio_bytes: bytes) -> str:
    text = ''
    rcg = Funasr_websocket_recognizer(
        host="127.0.0.1", port="10095", is_ssl=True, mode="2pass", chunk_size="0,10,5"
    )
    chunk_num = (len(audio_bytes) - 1) // config.CHUNK + 1
    for i in range(chunk_num):
        beg = i * config.CHUNK
        data = audio_bytes[beg: beg + config.CHUNK]
        resp = rcg.feed_chunk(data, wait_time=0.02)
        if len(resp) > 0 and resp['mode'] == '2pass-offline':
            text = resp['text']
    return text


# 初始化对话历史列表
conversation_history = []


# 假设的Qwen模型函数，用于意图识别和对话管理
def qwen_intent_recognition(text: str) -> str:
    openai.api_base = "http://localhost:8000/v1"
    openai.api_key = "none"
    conversation_history.append({"role": "user", "content": text})
    try:
        response = openai.ChatCompletion.create(
            model="Qwen",
            messages=conversation_history,
            stream=False,
            stop=[]
        )
        result = response.choices[0].message.content
        conversation_history.append({"role": "assistant", "content": result})
        return result
    except Exception as e:
        print(f"意图识别出现错误: {e}")
        return ""


async def play_audio_from_stream(url, data, chunk_size=8192):
    # print(f"开始向 {url} 发起音频请求，数据为 {data}")
    response = requests.get(url, data=data, stream=True)
    # print(f"请求音频返回状态码: {response.status_code}")
    n_channels = 1
    samp_width = 2
    framerate = 22050
    try:
        if response.status_code == 200:
            # print("状态码为200，准备初始化音频播放相关资源")
            p = pyaudio.PyAudio()
            stream = p.open(format=p.get_format_from_width(samp_width),
                            channels=n_channels,
                            rate=framerate,
                            output=True)
            # print("音频播放资源初始化完成，开始分段读取并播放音频数据")
            for chunk in response.iter_content(chunk_size=chunk_size):
                if chunk:
                    # print("获取到音频数据块，开始写入音频流进行播放")
                    stream.write(chunk)
                    await asyncio.sleep(0)
            # print("音频数据播放完毕，准备关闭音频流和释放资源")
            stream.stop_stream()
            stream.close()
            p.terminate()
            # print("音频播放完成，资源已释放")
        else:
            print(f"请求失败: 状态码 {response.status_code}，无法获取到音频数据进行播放")
    except Exception as e:
        print(f"音频播放出现错误: {e}")

# FastAPI应用
app = FastAPI()


# 语音AI交互系统的端点
@app.get("/voice-ai")
async def voice_ai_interaction():
    pass

# 运行FastAPI应用
if __name__ == "__main__":
    audio = pyaudio.PyAudio()
    stream = audio.open(format=config.FORMAT, channels=config.CHANNELS,
                        rate=config.RATE, input=True,
                        frames_per_buffer=config.CHUNK)
    try:
        print("Start speaking...")
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        while True:
            frames = []
            for i in range(0, int(config.RATE / config.CHUNK * 10)):
                data = stream.read(config.CHUNK, exception_on_overflow=False)
                frames.append(data)
            audio_bytes = b''.join(frames)
            text = funasr_speech_to_text(audio_bytes=audio_bytes)
            print(f'用户：{text}')
            content = qwen_intent_recognition(text)
            print(f'Qwen_chat:{content}')
            data = {
                'tts_text': content,
                'spk_id': '中文女'
            }
            audio_task = loop.create_task(play_audio_from_stream(url=config.COSY_VOICE_URL, data=data))
            loop.run_until_complete(audio_task)
    except Exception as e:
        print("读取音频时出错:", e)
    finally:
        stream.stop_stream()
        stream.close()
        audio.terminate()