# main.py
import io
import os
from contextlib import asynccontextmanager

import cld2
import soundfile as sf
import torch
from fastapi import FastAPI, File, Form, HTTPException, UploadFile
from fastapi.responses import StreamingResponse
from kokoro import KModel, KPipeline
from pydantic import BaseModel
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline


class TTSRequest(BaseModel):
    input: str
    voice: str = "af_alloy"  # 默认 voice


class Voice(BaseModel):
    id: str
    name: str


class VoicesList(BaseModel):
    voices: list[Voice]


class ModelInfo(BaseModel):
    id: str
    object: str = "model"
    owned_by: str = "local"
    type: str = "tts"


class ModelsList(BaseModel):
    object: str = "list"
    models: list[ModelInfo]


class TranscriptionResponse(BaseModel):
    text: str


# --- 配置 ---
# TTS 模型配置
TTS_MODEL_PATH = os.environ.get("TTS_MODEL_PATH", "/data/work/HUGGINGFACE/model/Kokoro-82M")
CONFIG_PATH = os.path.join(TTS_MODEL_PATH, "config.json")
MODEL_WEIGHTS_PATH = os.path.join(TTS_MODEL_PATH, "kokoro-v1_0.pth")

# STT 模型配置
STT_MODEL_PATH = os.environ.get("STT_MODEL_PATH", "/data/work/HUGGINGFACE/model/whisper-large-v3")

DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
TORCH_DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32

print(f"使用的 TTS 模型路径: {TTS_MODEL_PATH}")
print(f"使用的 STT 模型路径: {STT_MODEL_PATH}")
print(f"使用的设备: {DEVICE}")


# --- Pydantic 模型，用于请求体验证 ---


@asynccontextmanager
async def lifespan(app: FastAPI):
    # --- TTS 模型加载 ---
    print("正在加载 Kokoro TTS 模型...")
    tts_model = KModel(config=CONFIG_PATH, model=MODEL_WEIGHTS_PATH).to(DEVICE).eval()
    # 为每个支持的语言创建一个 KPipeline，并共享同一个 KModel
    app.state.tts_pipelines = {
        "zh": KPipeline(lang_code="z", model=tts_model, repo_id="hexgrad/Kokoro-82M"),
        "en": KPipeline(lang_code="a", model=tts_model, repo_id="hexgrad/Kokoro-82M"),
    }
    print("TTS 中文和英文语言管道加载完成。")

    # --- STT 模型加载 ---
    print("正在加载 Whisper STT 模型...")
    # 自动语音识别 pipeline
    stt_model = AutoModelForSpeechSeq2Seq.from_pretrained(
        STT_MODEL_PATH,
        torch_dtype=TORCH_DTYPE,
        low_cpu_mem_usage=True,
        use_safetensors=True,
    )
    stt_model.to(DEVICE)

    processor = AutoProcessor.from_pretrained(STT_MODEL_PATH)

    app.state.stt_pipeline = pipeline(
        "automatic-speech-recognition",
        model=stt_model,
        tokenizer=processor.tokenizer,
        feature_extractor=processor.feature_extractor,
        torch_dtype=TORCH_DTYPE,
        device=DEVICE,
    )
    print("STT 模型加载完成。")

    yield
    # 在应用关闭时清理资源
    print("正在清理和释放资源...")
    del app.state.tts_pipelines
    del app.state.stt_pipeline
    if torch.cuda.is_available():
        torch.cuda.empty_cache()


# --- FastAPI 应用实例 ---
app = FastAPI(lifespan=lifespan)


# --- API 端点 ---
@app.get("/v1/audio/voices", response_model=VoicesList)
async def list_voices():
    """列出所有可用的声音"""
    voices_dir = os.path.join(TTS_MODEL_PATH, "voices")
    try:
        if not os.path.isdir(voices_dir):
            raise FileNotFoundError("声音目录未找到")

        voice_files = [f for f in os.listdir(voices_dir) if f.endswith(".pt")]
        voices = []
        for f in voice_files:
            voice_id = os.path.splitext(f)[0]
            voices.append(Voice(id=voice_id, name=voice_id.replace("_", " ").title()))

        return VoicesList(voices=voices)
    except Exception as e:
        print(f"列出声音时出错: {e}")
        raise HTTPException(status_code=500, detail=f"无法列出声音: {str(e)}")


@app.get("/v1/audio/models", response_model=ModelsList)
async def list_models():
    """列出可用的模型"""
    model_data = [
        ModelInfo(id="kokoro-tts", object="model", owned_by="local", type="tts"),
        ModelInfo(id="whisper-large-v3", object="model", owned_by="local", type="stt"),
    ]
    return ModelsList(models=model_data)


@app.post("/v1/audio/transcriptions", response_model=TranscriptionResponse)
async def create_transcription(file: UploadFile = File(...), language: str | None = Form(None)):
    """将音频转换为文本"""
    try:
        # 读取上传的音频文件内容
        audio_bytes = await file.read()

        print(f"收到文件 '{file.filename}' ({file.content_type}), 大小: {len(audio_bytes)} bytes. 开始转写...")

        # 使用 STT pipeline 进行处理
        generate_kwargs = {"task": "transcribe"}
        if language:
            generate_kwargs["language"] = language

        transcription = app.state.stt_pipeline(audio_bytes, generate_kwargs=generate_kwargs)

        text = transcription.get("text", "").strip()
        whisper_detected_language = transcription.get("language", "未知")  # 获取 Whisper 检测到的语言
        print(f"转写完成. Whisper 检测到语言: {whisper_detected_language}. 结果: '{text[:50]}...'")

        return TranscriptionResponse(text=text)
    except Exception as e:
        print(f"处理音频转写时出错: {e}")
        raise HTTPException(status_code=500, detail=f"无法处理音频文件: {str(e)}")


@app.post("/v1/audio/speech")
async def generate_speech(request_data: TTSRequest):
    try:
        text_input = request_data.input

        # 1. 自动检测语言 (使用 cld2)
        lang = "en"  # 默认语言
        detection_result = cld2.detect(text_input)
        is_reliable = detection_result.is_reliable
        details = detection_result.details

        if is_reliable and details and details[0][1].startswith("zh"):
            lang = "zh"

        print(f"最终使用的语言: {lang}")

        # 2. 根据语言选择对应的 pipeline
        tts_pipeline_instance = app.state.tts_pipelines.get(lang)

        # 构建声音文件的完整本地路径
        voice_name = request_data.voice
        voice_path = os.path.join(TTS_MODEL_PATH, "voices", f"{voice_name}.pt")

        # 检查声音文件是否存在
        if not os.path.exists(voice_path):
            raise HTTPException(
                status_code=404,
                detail=f"声音文件未找到: {voice_path}. 可用的声音可能在 '{os.path.join(TTS_MODEL_PATH, 'voices')}' 目录下。",
            )

        print(f"正在为文本生成语音: '{text_input[:30]}...' 使用声音: {voice_path}")

        # 使用 KPipeline 生成语音，它会返回一个生成器
        generator = tts_pipeline_instance(text_input, voice=voice_path)

        # 遍历生成器，收集所有音频块
        audio_chunks = []
        for result in generator:
            if result.audio is not None:
                audio_chunks.append(result.audio)

        if not audio_chunks:
            raise HTTPException(status_code=500, detail="模型未能生成任何音频数据。")

        # 将所有音频块拼接成一个完整的音频
        full_audio = torch.cat(audio_chunks)

        # 将输出的 numpy 数组音频转换为 WAV 格式的字节流
        buffer = io.BytesIO()
        # Kokoro 默认采样率是 24000
        sf.write(buffer, full_audio.numpy(), samplerate=24000, format="WAV")
        buffer.seek(0)

        print("语音生成成功，正在返回音频流。")
        return StreamingResponse(buffer, media_type="audio/wav")

    except Exception as e:
        print(f"错误: {e}")
        raise HTTPException(status_code=500, detail=str(e))


# 用于健康检查的根端点
@app.get("/")
def read_root():
    return {
        "status": "语音服务正在运行",
        "tts_model_path": TTS_MODEL_PATH,
        "stt_model_path": STT_MODEL_PATH,
        "device": DEVICE,
    }
