from fastapi import APIRouter, UploadFile, File, HTTPException, Depends, Request,Form
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import Optional, Any, Dict
import numpy as np
import soundfile as sf
from io import BytesIO
import os
from utils.file_handler import save_temp_file, remove_temp_file
from utils.logger import setup_logger
from config import MODEL_CONFIG
from models.dependencies import get_dependencies
import whisper

# 假设ModelHandler和ModelConfig定义在你的代码中
from data_handler.data_model_process import ModelHandler
from config import ModelConfig  # 替换为实际模块路径

# Whisper 路由
router_whisper = APIRouter(prefix="/whisper", tags=["Whisper"])
logger = setup_logger()

class WhisperConfig(BaseModel):
    language: Optional[str] = "auto"
    batch_size: int = 16
    align_output: bool = True
    custom_alignment_model: Optional[str] = None
    additional_params: Dict = {}
    stream: bool = False  # 添加 stream 字段以支持流式控制


import json
from types import SimpleNamespace
import whisper

@router_whisper.post("/transcribe")
async def transcribe(
    audio: UploadFile = File(...),
    config : str = Form(...),
    dependencies: Dict = Depends(get_dependencies)
):
    """
    Whisper 语音转录服务（非流式）。
    - 输入: 音频文件（wav/mp3）
    - 输出: {"text": 完整文本, "segments": [{"start": 时间, "end": 时间, "text": 段落}]}
    """
    logger.info("语音转录开始")
    config = json.loads(config)
    if type(config) == dict:
        # 转对象
        config= SimpleNamespace(**config)

    logger.debug(f"Whisper 配置: {config.language} {config.stream}")

    try:
        # 保存音频文件到临时路径
        temp_file_path = await save_temp_file(audio)
        
        handler = ModelHandler()
        # 转换 WhisperConfig 为 ModelConfig
        model_config = ModelConfig(
            model="whisper",  # 固定模型名称
            model_name="whisper",  # 固定模型名称
            temperature=0.0,  # Whisper 通常不需要 temperature
            max_tokens=512,  # 默认值
            stream=config.stream,
            language=config.language,  # 扩展字段
            batch_size=config.batch_size,  # 扩展字段
            # align_output=config.align_output,  # 扩展字段
            # custom_alignment_model=config.custom_alignment_model  # 扩展字段
        )
        # 调用 ModelHandler
        result = await handler.call_model(
            model="whisper",
            model_type="whisperx",  # 保持类型为 whisperx，与 ModelHandler 一致
            data=temp_file_path,  # 传递音频文件路径
            streaming=False,  # 强制非流式
            config=model_config,
            dependencies=dependencies
        )
        return result  # 返回转录结果
    except Exception as e:
        logger.error(f"Whisper 转录失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"转录失败: {str(e)}")
    finally:
        if 'temp_file_path' in locals():
            remove_temp_file(temp_file_path)
        model_manager = dependencies.get("model_manager")
        if model_manager:
            model_manager.decrement_usage("whisper")