# main.py
import asyncio
from concurrent.futures import ThreadPoolExecutor
import time
from fastapi import FastAPI, Form, HTTPException, Request, UploadFile, File
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse
import os
import uuid
from config import app_config 
from pydub import AudioSegment
import numpy as np

from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware

# 导入模块
from src.asr.whisper_asr import transcribe_audio
from src.asr.sense_voice_asr import sv_transcribe_audio
from src.asr.qwen3_asr import q3asr_transcribe_audio_file

from src.llm.ollama import call_ollama
from src.llm.qwen import call_qwen
from src.tts.edge_tts_tts import text_to_speech_edge
from src.tts.gpt_sovits_tts import call_gpt_sovits
from src.init import init_tts

from src.util.response import error_response

app = FastAPI()

# 使用线程池执行阻塞的文件 I/O
thread_pool = ThreadPoolExecutor(max_workers=4)

# 允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 或指定你的前端地址，如 ["http://localhost:5173"]
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# ✅ 2. 挂载静态文件目录（音频、CSS、JS 等）
os.makedirs("static", exist_ok=True)
app.mount("/static", StaticFiles(directory="static"), name="static")

# ✅ 3. 配置模板目录（HTML 页面）
templates = Jinja2Templates(directory="templates")

# 路由分离
from src.api import character_api
app.include_router(character_api.router)
from src.api import sessions_api
app.include_router(sessions_api.router)

# 初始化TTS模型
init_tts();


# ✅ 4. 添加根路由，返回 HTML 页面
@app.get("/")
async def get_page(request: Request):
    return templates.TemplateResponse("index.html", {"request": request})


@app.post("/chat/text")
async def chat_text(text: str = Form(...)):
    # 编写方法的注释，带上参数和返回值说明
    """
    处理文本聊天请求
    :param text: 用户输入的文本
    :return: 处理结果
    """
    task_id = create_task_id()
    
    # 文字处理
    try:
        result = await process_text(text, task_id)
        return JSONResponse({"msg": "ok", "data": result})
    except Exception as e:
        return JSONResponse({"error": f"处理失败: {str(e)}"}, status_code=500)
    

@app.post("/chat/audio/file")
async def chat_audio_file(file: UploadFile = File(None)):
    task_id = create_task_id()

    # 处理音频（ASR）
    print(f"收到音频: {file.filename}")
    # 调用 ASR 获取文本...
    # 2. 保存上传的音频
    input_path = os.path.join("static","audio_files","input", f"{task_id}_input.wav")

    with open(input_path, "wb") as f:
        f.write(await file.read())
    
    # 3. ASR：语音转文字
    try:
        start_time = time.monotonic()
        if app_config.ASR_TYPE == 'sense_voice_asr':
            text = sv_transcribe_audio(audio_path=input_path)
        elif app_config.ASR_TYPE == 'qwen3_asr':
            text = q3asr_transcribe_audio_file(input_path)
        else:
            text = transcribe_audio(audio_file=input_path)
        end_time = time.monotonic()    
        print(f"[ASR] 识别结果: {text}")
        print(f"转录完成，耗时 {end_time - start_time:.2f} 秒")
    except Exception as e:
        return JSONResponse({"error": f"ASR失败: {str(e)}"}, status_code=500)

    # 语音有效性检查
    # if not is_valid_speech(text):
    #     return JSONResponse({"error": "无效语音，请重试"}, status_code=400)


    # 文字处理
    #text = "你是谁？"  # 测试固定文本
    try:
        result = await process_text(text, task_id)
        return JSONResponse({"msg": "ok", "data": result})
    except Exception as e:
        return JSONResponse({"error": f"处理失败: {str(e)}"}, status_code=500)

@app.post("/chat/audio/data")
async def chat_audio_data(request: Request):
    task_id = create_task_id()
    # 1. 读取原始请求体
    data = await request.body()
    audio_buff, header = process_audio(data)

    # 2. 保存音频文件（可选）,异步操作
    input_path = os.path.join("static","audio_files","input", f"{task_id}_input.wav")
    # 4. 🔥 异步保存（不阻塞响应！）
    if app_config.ASR_TYPE == 'qwen3_asr':
        await save_wav(audio_buff, header, input_path)
    else:
        asyncio.create_task(
            save_wav(audio_buff, header, input_path)
        )
    
    # 3. ASR：语音转文字
    try:
        start_time = time.monotonic()
        if app_config.ASR_TYPE == 'sense_voice_asr':
            text = sv_transcribe_audio(audio_data=audio_buff)
        elif app_config.ASR_TYPE == 'qwen3_asr':
            # 需要先保存为文件，再调用接口
            text = q3asr_transcribe_audio_file(input_path)
        else:
            text = transcribe_audio(audio_data=audio_buff)
        end_time = time.monotonic()
        print(f"转录完成，耗时 {end_time - start_time:.2f} 秒")
        print(f"[ASR] 识别结果: {text}")
    except Exception as e:
        return JSONResponse({"error": f"ASR失败: {str(e)}"}, status_code=500)

    # 文字处理
    #text = "你是谁？"  # 测试固定文本
    try:
        result = await process_text(text, task_id)
        return JSONResponse({"msg": "ok", "data": result})
    except Exception as e:
        return JSONResponse({"error": f"处理失败: {str(e)}"}, status_code=500)


def process_audio(audio_bytes: bytes):
    import struct
    import json
    # 1. 读前 4 字节：header 长度
    header_len = struct.unpack('<I', audio_bytes[0:4])[0]  # 小端 uint32

    # 2. 读 header
    header_json = audio_bytes[4:4+header_len].decode('utf-8')
    header = json.loads(header_json)  # => {sampleRate: 16000, ...}

    # 3. 读音频数据
    audio_bytes = audio_bytes[4 + header_len:]

    # 4. 转为 numpy 数组（float32）
    
    audio_buff = np.frombuffer(audio_bytes, dtype=np.float32)
    return audio_buff, header

async def save_wav(audio_buff, header, path):
    audio_int16 = (audio_buff * 32767).astype(np.int16)
    # 使用 pydub 创建 AudioSegment
    audio_segment = AudioSegment(
        data=audio_int16.tobytes(),
        sample_width=2,           # int16
        frame_rate=header['sampleRate'],   # 如 16000
        channels=1                # 单声道
    )
    audio_segment.export(path, format="wav")
    print(f"已保存音频文件: {path}")

async def process_text(text: str, task_id: str):  
    reply_text = "系统自定义：LLM返回失败";  
    # 4. LLM：生成回复
    start_time = time.monotonic()
    if app_config.LLM_TYPE == 'ollama':
        reply_text = call_ollama(user_input=text)
    else:
        reply_text = call_qwen(user_input=text)
    end_time = time.monotonic()
    print(f"LLM完成，耗时 {end_time - start_time:.2f} 秒")
    print(f"[LLM] 回复: {reply_text}")   
    
    if reply_text is None:
        # 抛出异常
        raise Exception("LLM未返回有效回复，无法进行TTS")  
     
    # 5. TTS：文字转语音
    output_path = os.path.join("static","audio_files","output", f"{task_id}_output.mp3")
    print(f"[TTS] 生成音频文件路径: {output_path}")
    try:
        print(f"[TTS] 使用TTS引擎: {app_config.TTS_TYPE}")
        start_time = time.monotonic()
        if app_config.TTS_TYPE == 'gpt_sovits':
            call_gpt_sovits(text=reply_text, lang="zh", output_file=output_path)
        else:
            await text_to_speech_edge(reply_text, output_path)
        end_time = time.monotonic()
        print(f"TTS完成，耗时 {end_time - start_time:.2f} 秒")  
    except Exception as e:
        raise Exception(f"TTS失败: {str(e)}")
    # 6. 返回音频 URL
    return {
        "text": text,
        "reply": reply_text,
        "audio_url": output_path
    }

# 创建任务id
def create_task_id():
    task_id = str(uuid.uuid4())
    print(f"task_id:{task_id}")
    return task_id


# 全局异常处理器
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
    # 处理普通异常
    return JSONResponse(
        content=error_response(message=f"操作失败: {str(exc)}", code=500)
    )

@app.exception_handler(HTTPException)
async def http_exception_handler(request: Request, exc: HTTPException):
    # 处理 FastAPI 内置 HTTP 异常
    return JSONResponse(
        content=error_response(message=exc.detail, code=exc.status_code)
    )



# 支持直接 python main.py 启动
if __name__ == "__main__":
    import uvicorn
    uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)