import os
import asyncio
import uvicorn
from fastapi import FastAPI, WebSocket
from fastapi_cache import FastAPICache
from fastapi_cache.backends.inmemory import InMemoryBackend
from fastapi_cache.decorator import cache

from audio_utils import TTSManager, STTManager
from llm_service import MockLLMService, OpenAIService

app = FastAPI()
FastAPICache.init(InMemoryBackend())

MAX_FRAME_SIZE = 1024 * 1024
END_MARKER = b'__END__'


@app.websocket("/ws/voice_to_text")
@cache(expire=300)  # 添加缓存装饰器,缓存5分钟
async def voice_to_text(websocket: WebSocket):
    await websocket.accept()
    try:
        audio_data = await websocket.receive_bytes()
        text = STTManager.speech_to_text(audio_data)
        if not text.strip():
            await websocket.send_text("请清晰提问")
        answer = MockLLMService.get_answer(text)
        result = text + '||' + answer
        await websocket.send_text(result)
    except Exception as e:
        print(f"Error: {e}")
    finally:
        await websocket.close()

@app.websocket("/ws/text_to_text")
@cache(expire=300)  # 添加缓存装饰器,缓存5分钟
async def text_to_text(websocket: WebSocket):
    await websocket.accept()
    try:
        text_data = await websocket.receive_text()
        answer = call_qwen_anwser(text_data)
        result = text_data + '||' + answer
        await websocket.send_text(result)
    except Exception as e:
        print(f"Error: {e}")
    finally:
        await websocket.close()


@app.websocket("/ws/text_to_voice")
@cache(expire=300)  # 添加缓存装饰器,缓存5分钟
async def text_to_voice(websocket: WebSocket):
    await websocket.accept()
    try:
        text_data = await websocket.receive_text()
        answer = call_qwen_anwser(text_data)
        tmp_file = "output/tmp_audio.wav"
        TTSManager.save_to_file(answer, tmp_file)

        # 读取文件内容并通过 WebSocket 发送
        with open(tmp_file, 'rb') as f:
            voice_data = f.read()

        # 分块发送（每个块不超过 MAX_FRAME_SIZE，末尾添加结束标记）
        chunk_size = MAX_FRAME_SIZE - 10  # 预留空间给结束标记
        end_marker = b'__END__'
        for i in range(0, len(voice_data), chunk_size):
            chunk = voice_data[i:i + chunk_size]
            if i + chunk_size >= len(voice_data):
                chunk += end_marker  # 最后一个块添加结束标记
            await websocket.send_bytes(chunk)
    except Exception as e:
        print(f"Error: {e}")
    finally:
        await websocket.close()
        # 删除临时文件
        tmp_file = "output/tmp_audio.wav"
        if os.path.exists(tmp_file):
            os.remove(tmp_file)


@app.websocket("/ws/voice_to_voice")
async def voice_to_voice(websocket: WebSocket):
    await websocket.accept()
    print(f"新连接：{websocket.client}")
    try:
        while True:  # 核心：无限循环处理多次提问
            try:
                # 1. 接收客户端音频（添加超时处理）
                audio_data = await asyncio.wait_for(
                    websocket.receive_bytes(),
                    timeout=60  # 60秒无输入则断开
                )
            except asyncio.TimeoutError:
                print("客户端超时，断开连接")
                break
            except Exception:
                print("客户端主动关闭连接")
                break

            # 2. 语音转文本（添加异常捕获）
            try:
                text = STTManager.speech_to_text(audio_data)
                if not text.strip():
                    await websocket.send_text("请清晰提问")
                    continue
            except Exception as e:
                print(f"语音识别失败: {e}")
                continue

            # 3. 调用大模型获取答案
            try:
                answer = MockLLMService.get_answer(text)
            except Exception as e:
                print(f"获取大模型答案失败: {e}")
                continue

            # 4. 生成回复音频
            try:
                os.makedirs("output", exist_ok=True)
                tmp_file = "output/tmp_audio.wav"
                TTSManager.save_to_file(answer, tmp_file)
            except Exception as e:
                print(f"文本转语音失败: {e}")
                continue

            # 5. 分块发送音频（增强错误处理）
            try:
                with open(tmp_file, 'rb') as f:
                    voice_data = f.read()
                    chunk_size = MAX_FRAME_SIZE - len(END_MARKER)
                    for i in range(0, len(voice_data), chunk_size):
                        chunk = voice_data[i:i + chunk_size]
                        if i + chunk_size < len(voice_data):
                            await websocket.send_bytes(chunk)
                        else:
                            await websocket.send_bytes(chunk + END_MARKER)
            except Exception as e:
                print("发送过程中客户端断开")
                break
            finally:
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
    except Exception as e:
        print(f"服务端致命错误: {e}")
    finally:
        print(f"连接关闭：{websocket.client}")


def call_qwen_anwser(question):
    if len(question) > 3:
        answer = MockLLMService.get_answer(question)
        if answer == "对不起，没有找到答案。":
            openai_serv = OpenAIService()
            model_id = "qwen2.5/qwen7b"
            answer = openai_serv.call_api(model_id, question)
        return answer
    else:
        return "对不起，没有听见您的声音。"


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)
