from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import os
import tempfile
import shutil
from typing import Optional
import time
import logging
from datetime import datetime
from faster_whisper import WhisperModel
import soundfile as sf

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),  # 输出到控制台
        logging.FileHandler("asr_api.log")  # 输出到文件
    ]
)
logger = logging.getLogger("asr_api")

# 创建FastAPI应用
app = FastAPI(
    title="语音识别API",
    description="基于Faster Whisper的语音识别API",
    version="1.0.0"
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头
)

# 全局模型变量
whisper_model = None

@app.on_event("startup")
async def startup_event():
    """服务启动时加载模型"""
    global whisper_model
    logger.info("正在加载Faster Whisper模型...")
    model_size = "large-v3"  # 可以通过环境变量或配置文件来设置
    whisper_model = WhisperModel(model_size)
    logger.info(f"Faster Whisper模型 {model_size} 加载完成")

def get_audio_info(audio_path):
    """获取音频文件的信息"""
    try:
        audio_info = sf.info(audio_path)
        return {
            "duration": round(audio_info.duration, 2),
            "samplerate": audio_info.samplerate,
            "channels": audio_info.channels,
            "format": audio_info.format,
            "subtype": audio_info.subtype
        }
    except Exception as e:
        logger.error(f"获取音频信息时出错: {str(e)}")
        return {"error": str(e)}

@app.post("/asr/", response_class=JSONResponse)
async def transcribe_audio(file: UploadFile = File(...), model_size: Optional[str] = None):
    """
    上传音频文件进行识别
    
    - **file**: 要识别的音频文件
    - **model_size**: 可选，指定模型大小（如果不指定，使用全局加载的模型）
    
    返回JSON格式的识别结果
    """
    # 立即记录访问日志
    request_id = f"{int(time.time())}-{file.filename}"
    logger.info(f"收到ASR请求 [ID:{request_id}]: 文件名='{file.filename}', 模型={model_size if model_size else 'large-v3'}")
    
    # 记录整个处理流程的开始时间
    total_start_time = time.time()
    
    global whisper_model
    
    # 检查模型是否已加载
    if whisper_model is None:
        error_msg = "ASR模型尚未加载，请稍后再试"
        logger.error(f"请求 [ID:{request_id}] 失败: {error_msg}")
        raise HTTPException(status_code=500, detail=error_msg)
    
    # 检查文件类型
    allowed_extensions = [".wav", ".mp3", ".flac", ".m4a", ".ogg"]
    file_ext = os.path.splitext(file.filename)[1].lower()
    if file_ext not in allowed_extensions:
        error_msg = f"不支持的文件类型。允许的类型: {', '.join(allowed_extensions)}"
        logger.error(f"请求 [ID:{request_id}] 失败: {error_msg}")
        raise HTTPException(status_code=400, detail=error_msg)
    
    logger.info(f"请求 [ID:{request_id}] 文件类型检查通过，开始保存临时文件")
    
    # 保存上传的文件到临时目录
    with tempfile.NamedTemporaryFile(delete=False, suffix=file_ext) as temp_file:
        shutil.copyfileobj(file.file, temp_file)
        temp_file_path = temp_file.name
    
    logger.info(f"请求 [ID:{request_id}] 临时文件已保存: {temp_file_path}")
    
    try:
        # 获取音频信息
        logger.info(f"请求 [ID:{request_id}] 开始获取音频信息")
        audio_info = get_audio_info(temp_file_path)
        logger.info(f"请求 [ID:{request_id}] 音频时长: {audio_info.get('duration', 0):.2f}秒")
        
        # 记录模型处理的开始时间
        model_start_time = time.time()
        logger.info(f"请求 [ID:{request_id}] 开始转写音频")
        
        # 执行转写
        segments, info = whisper_model.transcribe(temp_file_path)
        
        # 计算模型处理时间
        model_processing_time = time.time() - model_start_time
        logger.info(f"请求 [ID:{request_id}] 模型处理完成，耗时: {model_processing_time:.2f}秒")
        
        # 将segments转换为列表（因为它可能是生成器）
        segments_list = list(segments)
        
        # 提取所有文本
        all_text = " ".join([segment.text for segment in segments_list])
        
        # 计算总处理时间（包括文件上传、音频信息提取、模型处理、结果整理等全部过程）
        total_processing_time = time.time() - total_start_time
        
        # 创建完整结果字典
        result = {
            "audio_file": file.filename,
            "audio_duration_seconds": audio_info.get("duration", 0),
            "audio_info": audio_info,
            "model_processing_time_seconds": round(model_processing_time, 2),
            "total_processing_time_seconds": round(total_processing_time, 2),
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "model_id": f"faster-whisper-{model_size if model_size else 'large-v3'}",
            "detected_language": info.language,
            "language_probability": info.language_probability,
            "text": all_text,
            "segments": []
        }
        
        # 添加所有语音片段
        for segment in segments_list:
            segment_info = {
                "start": segment.start,
                "end": segment.end,
                "text": segment.text
            }
            result["segments"].append(segment_info)
        
        # 最终更新总处理时间（确保包含结果整理的时间）
        result["total_processing_time_seconds"] = round(time.time() - total_start_time, 2)
        
        # 记录成功完成的日志
        logger.info(f"请求 [ID:{request_id}] 处理成功: 检测语言={info.language}, 总耗时={result['total_processing_time_seconds']}秒")
        
        return result
        
    except Exception as e:
        error_msg = f"处理音频时出错: {str(e)}"
        logger.error(f"请求 [ID:{request_id}] 处理失败: {error_msg}")
        raise HTTPException(status_code=500, detail=error_msg)
    
    finally:
        # 删除临时文件
        if os.path.exists(temp_file_path):
            os.unlink(temp_file_path)
            logger.info(f"请求 [ID:{request_id}] 临时文件已删除")

@app.get("/")
async def root():
    """API根路径，返回简单的欢迎信息"""
    logger.info("访问根路径")
    return {
        "message": "欢迎使用语音识别API",
        "usage": "请使用POST方法发送音频文件到/asr/端点"
    }

@app.get("/health")
async def health_check():
    """健康检查端点"""
    global whisper_model
    status = "ok" if whisper_model is not None else "loading"
    logger.info(f"健康检查: {status}")
    return {
        "status": status,
        "model_loaded": whisper_model is not None
    }

if __name__ == "__main__":
    # 启动服务器
    logger.info("启动ASR API服务器，端口: 8001")
    uvicorn.run("main:app", host="0.0.0.0", port=8001, reload=False) 