import os
import wave
from io import BytesIO
import asyncio
import logging
import pyaudio
import requests
import uvicorn
from fastapi import FastAPI, File, UploadFile, HTTPException, Response
import openai
from utils.Config import AppConfig


# 初始化语音识别相关配置和对象
from utils.funasr_client_api import Funasr_websocket_recognizer


config = AppConfig()

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def handle_request_exceptions(func):
    async def wrapper(*args, **kwargs):
        try:
            return await func(*args, **kwargs)
        except requests.exceptions.RequestException as e:
            logging.error(f"网络连接出现问题: {e}")
            return "网络连接出现问题，请稍后再试"
        except Exception as e:
            logging.error(f"出现其他错误: {e}")
            return "出现未知错误，请检查输入内容"
    return wrapper


def init_funasr_recognizer():
    return Funasr_websocket_recognizer(
        host=config.FUNASR_HOST,
        port=config.FUNASR_PORT,
        is_ssl=config.FUNASR_IS_SSL,
        mode=config.FUNASR_MODE,
        chunk_size=config.FUNASR_CHUNK_SIZE
    )


def funasr_speech_to_text(audio_bytes: bytes, rcg, config) -> str:
    chunk_num = (len(audio_bytes) - 1) // config.AUDIO_CHUNK + 1
    recognized_texts = []
    for i in range(chunk_num):
        begin_index = i * config.AUDIO_CHUNK
        data_chunk = audio_bytes[begin_index: begin_index + config.AUDIO_CHUNK]
        response_chunk = rcg.feed_chunk(data_chunk, wait_time=0.02)
        if len(response_chunk) > 0 and response_chunk['mode'] == '2pass-offline':
            recognized_texts.append(response_chunk['text'])
    if not recognized_texts:
        logging.warning("语音识别未获取到有效文本内容，可能是音频数据问题或识别条件未满足。")
    return "".join(recognized_texts)


# 改为流式的意图识别函数
@handle_request_exceptions
@handle_request_exceptions
async def qwen_intent_recognition(text: str, conversation_history):
    openai.api_base = config.QWEN_API_BASE
    openai.api_key = config.QWEN_API_KEY
    conversation_history.append({"role": "user", "content": text})
    response = openai.ChatCompletion.create(
        model="Qwen",
        messages=conversation_history,
        stream=True,  # 设置为流式返回
        stop=[]
    )
    result = ""
    async for chunk in response:
        # 检查是否包含有效的回复内容块
        if (
            'choices' in chunk
            and len(chunk['choices']) > 0
            and 'delta' in chunk['choices'][0]
            and 'content' in chunk['choices'][0]['delta']
        ):
            content = chunk['choices'][0]['delta']['content']
            result += content
            # 在这里可以考虑每生成一段文本就调用音频生成播放函数，实现同步播放
            yield content  # 逐块返回内容，方便外部处理和同步播放音频
    conversation_history.append({"role": "assistant", "content": result})
    # try:
    #     response = openai.ChatCompletion.create(
    #         model="Qwen",
    #         messages=conversation_history,
    #         stream=True,  # 设置为流式返回
    #         stop=[]
    #     )
    #     result = ""
    #     async for chunk in response:
    #         if 'choices' in chunk and len(chunk['choices']) > 0 and 'delta' in chunk['choices'][0] and 'content' in chunk['choices'][0]['delta']:
    #             content = chunk['choices'][0]['delta']['content']
    #             result += content
    #             # 在这里可以考虑每生成一段文本就调用音频生成播放函数，实现同步播放
    #             yield content  # 逐块返回内容，方便外部处理和同步播放音频
    #     conversation_history.append({"role": "assistant", "content": result})
    # except requests.exceptions.RequestException as e:
    #     logging.error(f"网络连接出现问题导致意图识别失败: {e}")
    #     yield "网络连接出现问题，请稍后再试"
    # except Exception as e:
    #     logging.error(f"意图识别出现其他错误: {e}")
    #     yield "意图识别出现错误，请检查输入内容"


@handle_request_exceptions
async def play_audio_from_stream(url, data, chunk_size=8192):
    response = requests.get(url, data=data, stream=True)
    audio_data = bytearray()
    if response.status_code == 200:
        p = pyaudio.PyAudio()
        stream = p.open(format=p.get_format_from_width(config.AUDIO_SAMP_WIDTH),
                        channels=config.AUDIO_CHANNELS,
                        rate=config.AUDIO_RATE,
                        output=True)
        for chunk in response.iter_content(chunk_size=chunk_size):
            if chunk:
                stream.write(chunk)
                audio_data.extend(chunk)
                await asyncio.sleep(0)
        stream.stop_stream()
        stream.close()
        p.terminate()
        return bytes(audio_data)
    else:
        logging.error(f"请求失败: 状态码 {response.status_code}，无法获取到音频数据进行播放")
        return None


# 检测新命令的函数，简单通过检测麦克风输入音量来判断是否有新指令（简单示例，可优化）
def some_condition_to_detect_new_command():
    # 初始化pyaudio对象
    p = pyaudio.PyAudio()
    # 打开麦克风输入流
    stream = p.open(format=config.AUDIO_FORMAT,
                    channels=config.AUDIO_CHANNELS,
                    rate=config.AUDIO_RATE,
                    input=True,
                    frames_per_buffer=config.AUDIO_CHUNK)
    # 简单读取几帧音频数据判断音量（这里只是简单示例，可以优化判断逻辑）
    num_frames_to_check = 5
    audio_frames = []
    for _ in range(num_frames_to_check):
        data = stream.read(config.AUDIO_CHUNK)
        audio_frames.append(data)
    # 简单计算音量（这里以音频数据绝对值之和作为简单衡量，实际可优化）
    volume_sum = sum(sum(abs(x)) for x in [bytearray(frame) for frame in audio_frames])
    # 关闭输入流和释放pyaudio资源
    stream.stop_stream()
    stream.close()
    p.terminate()
    # 设定一个简单的音量阈值来判断是否有新指令输入，这里阈值只是示例，需根据实际调整
    volume_threshold = 10000
    return volume_sum > volume_threshold


app = FastAPI()
rcg = init_funasr_recognizer()
conversation_history = []


# 语音AI交互系统的端点，调整入参为接收上传文件的形式，更符合接收音频数据的实际情况
@app.post("/voice-ai")
async def voice_ai_interaction(audio_file: UploadFile = File(...)):
    try:
        # 读取上传文件的音频字节数据
        audio_bytes = await audio_file.read()

        # 语音识别
        text = funasr_speech_to_text(audio_bytes, rcg, config)
        if not text:
            raise HTTPException(status_code=400, detail="语音识别失败，未获取到有效文本内容")
        logging.info(f"语音识别成功，文本内容: {text}")

        # 意图识别和对话管理，使用流式回复并同步播放音频
        async for response_text in qwen_intent_recognition(text, conversation_history):
            data = {
                'tts_text': response_text,
                'spk_id': '中文男'
            }
            audio_response = await play_audio_from_stream(config.COSY_VOICE_URL, data)
            if not audio_response:
                raise HTTPException(status_code=400, detail="音频生成失败，未获取到有效音频数据")
            yield Response(content=audio_response, media_type="audio/wav")
            # 这里调用检测新指令的函数
            if some_condition_to_detect_new_command():
                break

    except HTTPException:
        raise
    except Exception as e:
        logging.error(f"出现未知错误: {e}")
        raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
    host = config.APP_HOST  # 优先从环境变量获取，默认值为 0.0.0.0
    port = config.APP_PORT  # 优先从环境变量获取，默认值为 9000
    try:
        uvicorn.run(app, host=host, port=port, log_level="info")
    except Exception as e:
        logging.error(f"启动 FastAPI 应用出错: {e}")