import time

import openai
import pyaudio
import requests
from fastapi import FastAPI, File, UploadFile, BackgroundTasks
from huggingface_hub._webhooks_server import JSONResponse
from pydantic import BaseModel
import uvicorn

from utils.funasr_client_api import Funasr_websocket_recognizer

FORMAT = pyaudio.paInt16  # 音频格式
CHANNELS = 1  # 单声道
RATE = 16000  # 采样率
CHUNK = int(60 * 10 / 10 / 1000 * RATE * 2)  # 每帧的大小，根据你的stride计算
COSY_VOICE_URL = 'http://localhost:50000/inference_zero_shot'
COSY_VOICE_FILES = [('prompt_wav', ('prompt_wav', open('F:\\夸克下载\\李琦语音包.wav', 'rb'), 'application/octet-stream'))]


# 假设的FunASR模型函数，用于语音识别
def funasr_speech_to_text(audio_bytes: bytes) -> str:
    # 这里应该是调用FunASR模型的代码
    # 创建一个recognizer
    text = ''
    rcg = Funasr_websocket_recognizer(
        host="127.0.0.1", port="10095", is_ssl=True, mode="2pass", chunk_size="0,10,5"
    )
    # 发送音频数据
    chunk_num = (len(audio_bytes) - 1) // CHUNK + 1
    for i in range(chunk_num):
        beg = i * CHUNK
        data = audio_bytes[beg: beg + CHUNK]
        resp = rcg.feed_chunk(data, wait_time=0.02)
        if len(resp) > 0 and resp['mode'] == '2pass-offline':
            text = resp['text']
        time.sleep(0.05)
    # 返回识别出的文本
    return text

# 初始化对话历史列表
conversation_history = []
# 假设的Qwen模型函数，用于意图识别和对话管理
def qwen_intent_recognition(text: str) -> str:
    """
        调用Qwen模型进行意图识别，并维护对话上下文。

        :param text: 用户的输入文本
        :return: 模型的回复内容
        """
    # 设置API基础地址和密钥
    openai.api_base = "http://localhost:8000/v1"
    openai.api_key = "none"

    # 将用户输入添加到对话历史
    conversation_history.append({"role": "user", "content": text})

    # 不使用流式回复的请求
    response = openai.ChatCompletion.create(
        model="Qwen",
        messages=conversation_history,
        stream=False,
        stop=[]  # 在此处添加自定义的stop words
    )

    # 将模型回复添加到对话历史
    conversation_history.append({"role": "assistant", "content": response.choices[0].message.content})

    # 返回模型的回复内容
    return response.choices[0].message.content


def play_audio_from_stream(url, data, files):
    """
    从响应流直接播放音频。

    :param url: 请求的URL
    :param data: 请求的数据
    :param files: 请求的文件
    """
    # 发送请求并获取响应流
    response = requests.get(url, data=data, files=files, stream=True)

    # 音频参数，您需要根据实际情况设置这些参数
    n_channels = 1  # 单声道
    samp_width = 2  # 采样宽度，16位音频是2字节
    framerate = 22050  # 帧率

    if response.status_code == 200:
        # 初始化pyaudio
        p = pyaudio.PyAudio()

        # 打开音频流
        stream = p.open(format=p.get_format_from_width(samp_width),
                        channels=n_channels,
                        rate=framerate,
                        output=True)

        # 从流式响应中迭代音频数据并播放
        for chunk in response.iter_content(chunk_size=8192):
            if chunk:  # 确保chunk不为空
                stream.write(chunk)

        # 停止音频流
        stream.stop_stream()
        stream.close()
        p.terminate()
        print("音频播放完成")
    else:
        print(f"请求失败: 状态码 {response.status_code}")

# FastAPI应用
app = FastAPI()

# Pydantic模型，用于定义请求体结构
class AudioRequest(BaseModel):
    audio_file: UploadFile = File(...)  # 音频文件

# 语音AI交互系统的端点
@app.post("/voice-ai")
async def voice_ai_interaction(request: AudioRequest, background_tasks: BackgroundTasks):
    # 1. 从请求中获取音频文件
    audio_file = request.audio_file

    # 2. 使用FunASR模型进行语音识别
    audio_data = await audio_file.read()
    text = await funasr_speech_to_text(audio_data)

    # 3. 使用Qwen模型进行意图识别和对话管理
    response_text = await qwen_intent_recognition(text)

    # 将处理结果添加到后台任务
    def process_and_play_audio():
        data = {
            'tts_text': response_text,
            'prompt_text': '我们目前召集了几个人了？'
        }
        response_audio = play_audio_from_stream(url=COSY_VOICE_URL, data=data, files=COSY_VOICE_FILES)
        print(f"Response Audio: {response_audio}")  # 处理响应音频

    background_tasks.add_task(process_and_play_audio)

    # 返回立即响应
    return JSONResponse(content={"message": "Audio processing started"}, status_code=202)


# 运行FastAPI应用
if __name__ == "__main__":
    # uvicorn.run(app, host="0.0.0.0", port=8000)

    # 初始化pyaudio
    audio = pyaudio.PyAudio()

    # 打开麦克风
    stream = audio.open(format=FORMAT, channels=CHANNELS,
                    rate=RATE, input=True,
                    frames_per_buffer=CHUNK)
    try:
        # 循环获取麦克风音频并发送
        print("Start speaking...")
        while True:
            # 读取麦克风数据
            frames = []
            for i in range(0, int(RATE / CHUNK * 10)):  # 假设我们想要5秒的音频
                data = stream.read(CHUNK, exception_on_overflow=False)
                frames.append(data)
            # 将帧合并为单个音频字节
            audio_bytes = b''.join(frames)
            text = funasr_speech_to_text(audio_bytes=audio_bytes)
            print(f'用户：{text}')
            content = qwen_intent_recognition(text)
            print(f'Qwen_chat:{content}')
            data = {
                'tts_text': content,
                'prompt_text': '我们目前召集了几个人了？'
            }
            # 4. 使用CosyVoice模型将回应文本转换为语音
            response_audio = play_audio_from_stream(url=COSY_VOICE_URL, data=data, files=COSY_VOICE_FILES)
    except Exception as e:
        print("读取音频时出错:", e)
    finally:
        # 关闭流和PyAudio对象
        stream.stop_stream()
        stream.close()
        audio.terminate()