import json
import os
import logging
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional, Dict, Any

import librosa
import soundfile
import uvicorn
from fastapi import FastAPI, UploadFile, File, HTTPException, Depends
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse, JSONResponse
from funasr import AutoModel

# 配置管理
class Settings:
    # 模型配置
    MODEL_PATH: Optional[str] = None  # 本地模型路径，None则使用默认模型
    STREAM_MODEL_PATH: Optional[str] = None  # 流式模型路径
    
    # 音频处理配置
    TARGET_SAMPLE_RATE: int = 16000
    TARGET_CHANNELS: int = 1  # 单声道
    
    # 分块配置
    CHUNK_SIZE: list = [0, 10, 5]  # [0, 10, 5] 对应600ms分块
    ENCODER_CHUNK_LOOK_BACK: int = 4
    DECODER_CHUNK_LOOK_BACK: int = 1
    
    # 支持的音频格式
    ALLOWED_EXTENSIONS: set = {".wav", ".flac", ".ogg", ".mp3", ".m4a", ".aac"}

# 初始化配置
settings = Settings()

# 日志配置
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler("asr_service.log")
    ]
)
logger = logging.getLogger("FunASR-API")

# 模型加载（单例模式）
class ModelManager:
    _instance = None
    _models: Dict[str, Any] = {}
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def load_model(self, model_name: str, **kwargs):
        """加载模型，确保只加载一次"""
        if model_name not in self._models:
            try:
                logger.info(f"开始加载模型: {model_name}")
                if model_name == "streaming":
                    model_path = settings.STREAM_MODEL_PATH or "paraformer-zh-streaming"
                    self._models[model_name] = AutoModel(model=model_path)
                else:
                    model_path = settings.MODEL_PATH or "paraformer-zh"
                    self._models[model_name] = AutoModel(
                        model=model_path,
                        vad_model="fsmn-vad",
                        punc_model="ct-punc",
                        spk_model="cam++",** kwargs
                    )
                logger.info(f"模型 {model_name} 加载成功")
            except Exception as e:
                logger.error(f"模型 {model_name} 加载失败: {str(e)}", exc_info=True)
                raise RuntimeError(f"模型加载失败: {str(e)}")
        return self._models[model_name]

# 初始化模型管理器
model_manager = ModelManager()

# 初始化FastAPI应用
app = FastAPI(
    title="语音识别API服务",
    description="基于FunASR的语音识别API，支持多种音频格式和流式识别",
    version="1.0.0"
)

# 跨域配置
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应限制具体域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

def allowed_file(filename: str) -> bool:
    """检查文件扩展名是否合法"""
    ext = Path(filename).suffix.lower()
    return ext in settings.ALLOWED_EXTENSIONS

def convert_audio(input_path: str, output_path: str) -> bool:
    """
    转换音频文件到目标格式（16kHz，单声道，WAV）
    返回转换是否成功
    """
    try:
        # 读取音频文件
        y, sr = librosa.load(input_path, sr=None, mono=False)
        
        # 转换为单声道
        if y.ndim > 1 and y.shape[0] > 1:
            y = librosa.to_mono(y)
        
        # 转换采样率
        if sr != settings.TARGET_SAMPLE_RATE:
            y = librosa.resample(
                y, 
                orig_sr=sr, 
                target_sr=settings.TARGET_SAMPLE_RATE
            )
        
        # 保存为WAV格式
        soundfile.write(
            output_path, 
            y, 
            settings.TARGET_SAMPLE_RATE,
            subtype='PCM_16'
        )
        return True
    except Exception as e:
        logger.error(f"音频转换失败: {str(e)}", exc_info=True)
        return False

async def process_audio_file(file: UploadFile) -> str:
    """处理上传的音频文件，返回转换后的临时文件路径"""
    # 检查文件格式
    if not allowed_file(file.filename):
        raise HTTPException(
            status_code=400, 
            detail=f"不支持的文件格式。支持的格式: {', '.join(settings.ALLOWED_EXTENSIONS)}"
        )
    
    # 保存上传的临时文件
    with NamedTemporaryFile(delete=False, suffix=Path(file.filename).suffix) as temp_input:
        temp_input.write(await file.read())
        temp_input_path = temp_input.name
    
    try:
        # 创建转换后的临时文件
        with NamedTemporaryFile(delete=False, suffix=".wav") as temp_output:
            temp_output_path = temp_output.name
        
        # 转换音频格式
        if not convert_audio(temp_input_path, temp_output_path):
            raise HTTPException(status_code=500, detail="音频格式转换失败")
            
        return temp_output_path
        
    finally:
        # 清理原始临时文件
        if os.path.exists(temp_input_path):
            os.remove(temp_input_path)

def get_stream_model():
    """获取流式识别模型（依赖注入）"""
    return model_manager.load_model("streaming")

def get_full_model():
    """获取完整识别模型（依赖注入）"""
    return model_manager.load_model("full")

@app.get("/health", summary="健康检查")
async def health_check():
    """检查服务是否正常运行"""
    return {"status": "healthy", "service": "FunASR API"}

@app.post("/transcribeStream", summary="流式语音识别")
async def asr_stream_endpoint(
    file: UploadFile = File(...),
    stream_model = Depends(get_stream_model)
):
    """
    接收音频文件并流式返回识别结果
    支持多种音频格式，会自动转换为模型所需格式
    """
    try:
        # 处理音频文件
        converted_path = await process_audio_file(file)
        logger.info(f"开始流式识别: {file.filename}")
        
        # 读取转换后的音频
        speech, sample_rate = soundfile.read(converted_path)
        chunk_stride = settings.CHUNK_SIZE[1] * 960
        total_chunk_num = int((len(speech) - 1) / chunk_stride + 1)
        cache = {}
        
        # 生成流式响应
        async def generate():
            try:
                for i in range(total_chunk_num):
                    start = i * chunk_stride
                    end = (i + 1) * chunk_stride
                    speech_chunk = speech[start:end]
                    is_final = (i == total_chunk_num - 1)
                    
                    # 模型识别
                    res = stream_model.generate(
                        input=speech_chunk,
                        cache=cache,
                        is_final=is_final,
                        chunk_size=settings.CHUNK_SIZE,
                        encoder_chunk_look_back=settings.ENCODER_CHUNK_LOOK_BACK,
                        decoder_chunk_look_back=settings.DECODER_CHUNK_LOOK_BACK
                    )
                    
                    # 提取识别结果
                    if res and isinstance(res, list):
                        texts = [item.get("text", "") for item in res]
                        full_text = "".join(texts).strip()
                    else:
                        full_text = ""
                        
                    logger.debug(f"流式识别片段 {i+1}/{total_chunk_num}: {full_text}")
                    yield json.dumps({"text": full_text, "final": is_final}, ensure_ascii=False) + "\n"
                    
            finally:
                # 清理转换后的临时文件
                if os.path.exists(converted_path):
                    os.remove(converted_path)
        
        return StreamingResponse(
            generate(), 
            media_type="application/json",
            headers={"X-Content-Type-Options": "nosniff"}
        )
        
    except HTTPException:
        # 已处理的HTTP异常直接抛出
        raise
    except Exception as e:
        logger.error(f"流式识别失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"流式识别服务错误: {str(e)}")

@app.post("/transcribe", summary="完整语音识别")
async def asr_endpoint(
    file: UploadFile = File(...),
    hotword: str = "魔搭",
    full_model = Depends(get_full_model)
):
    """
    接收音频文件并返回完整识别结果
    支持多种音频格式，会自动转换为模型所需格式
    """
    try:
        # 处理音频文件
        converted_path = await process_audio_file(file)
        logger.info(f"开始完整识别: {file.filename}")
        
        # 模型识别
        res = full_model.generate(
            input=converted_path,
            batch_size_s=300,
            hotword=hotword
        )
        
        # 构建响应
        result = {
            "filename": file.filename,
            "content": res,
            "status": "success"
        }
        
        return JSONResponse(result, status_code=200)
        
    except HTTPException:
        # 已处理的HTTP异常直接抛出
        raise
    except Exception as e:
        logger.error(f"完整识别失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"完整识别服务错误: {str(e)}")
    finally:
        # 清理转换后的临时文件
        if 'converted_path' in locals() and os.path.exists(converted_path):
            os.remove(converted_path)

if __name__ == "__main__":
    # 可以通过环境变量配置host和port
    host = os.getenv("API_HOST", "0.0.0.0")
    port = int(os.getenv("API_PORT", 8081))
    
    logger.info(f"启动语音识别API服务，地址: http://{host}:{port}")
    uvicorn.run(app, host=host, port=port)
    