# Set the device with environment, default is cuda:0
# export SENSEVOICE_DEVICE=cuda:1

import os, re, sys
from fastapi import FastAPI, Request, Query
from fastapi.responses import HTMLResponse
from typing_extensions import Annotated
from enum import Enum
import torchaudio
from model import SenseVoiceSmall
from funasr.utils.postprocess_utils import rich_transcription_postprocess
import logging
import tempfile
import subprocess
import torch
import numpy as np
import time
import random
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("sensevoice")

class Language(str, Enum):
    auto = "auto"
    zh = "zh"
    en = "en"
    yue = "yue"
    ja = "ja"
    ko = "ko"
    nospeech = "nospeech"

# 修改模型路径为本地目录
model_dir = "./models/SenseVoiceSmall"

# 检查模型目录是否存在
if not os.path.exists(model_dir):
    print(f"警告: 模型目录 {model_dir} 不存在，请将模型文件从 C:\\Users\\luomei\\.cache\\modelscope\\hub\\models\\iic\\SenseVoiceSmall 复制到此目录")
    print(f"当前工作目录: {os.getcwd()}")
    # 创建模型目录
    try:
        os.makedirs(model_dir, exist_ok=True)
        print(f"已创建模型目录: {model_dir}")
    except Exception as e:
        print(f"创建模型目录失败: {e}")
    
    # 继续使用modelscope路径作为备选
    model_fallback = "iic/SenseVoiceSmall"
    print(f"尝试从modelscope加载模型: {model_fallback}")
    try:
        m, kwargs = SenseVoiceSmall.from_pretrained(model=model_fallback, device=os.getenv("SENSEVOICE_DEVICE", "cuda:0"))
        m.eval()
    except Exception as e:
        print(f"从modelscope加载模型失败: {e}")
        print("请确保模型文件已正确安装")
        sys.exit(1)
else:
    try:
        # 从本地目录加载模型
        print(f"从本地目录加载模型: {model_dir}")
        m, kwargs = SenseVoiceSmall.from_pretrained(model=model_dir, device=os.getenv("SENSEVOICE_DEVICE", "cuda:0"))
        m.eval()
    except Exception as e:
        print(f"加载本地模型失败: {e}")
        # 尝试从modelscope加载模型
        model_fallback = "iic/SenseVoiceSmall"
        print(f"尝试从modelscope加载模型: {model_fallback}")
        try:
            m, kwargs = SenseVoiceSmall.from_pretrained(model=model_fallback, device=os.getenv("SENSEVOICE_DEVICE", "cuda:0"))
            m.eval()
        except Exception as e:
            print(f"从modelscope加载模型失败: {e}")
            print("请确保模型文件已正确安装")
            sys.exit(1)

regex = r"<\|.*\|>"

app = FastAPI(title="SenseVoice API", description="语音识别API服务")

# 全局模型变量
model = m
model_kwargs = kwargs

def load_model():
    """加载语音识别模型"""
    global model, model_kwargs
    
    # 设置模型路径
    model_dir = "./models/SenseVoiceSmall"
    
    try:
        # 尝试从本地目录加载模型
        if os.path.exists(model_dir):
            logger.info(f"从本地目录加载模型: {model_dir}")
            model, model_kwargs = SenseVoiceSmall.from_pretrained(
                model=model_dir, 
                device=os.getenv("SENSEVOICE_DEVICE", "cuda:0")
            )
        else:
            # 从modelscope加载模型
            logger.info("本地模型目录不存在，尝试从modelscope加载模型")
            model_fallback = "iic/SenseVoiceSmall"
            model, model_kwargs = SenseVoiceSmall.from_pretrained(
                model=model_fallback, 
                device=os.getenv("SENSEVOICE_DEVICE", "cuda:0")
            )
        
        model.eval()
        logger.info("模型加载成功")
        return True
    except Exception as e:
        logger.error(f"模型加载失败: {e}")
        return False

# 处理识别结果
def process_result(res):
    """处理识别结果，添加原始文本和干净文本"""
    if len(res) == 0:
        return {"result": []}
        
    for item in res[0]:
        item["raw_text"] = item["text"]
        item["clean_text"] = re.sub(regex, "", item["text"], 0, re.MULTILINE)
        item["text"] = rich_transcription_postprocess(item["text"])
    
    return {"result": res[0]}

# 辅助函数
def try_convert_with_ffmpeg(input_file, output_file):
    """尝试使用ffmpeg转换文件格式"""
    try:
        logger.info(f"尝试使用ffmpeg转换音频格式: {input_file} -> {output_file}")
        subprocess.run(
            ['ffmpeg', '-i', input_file, '-acodec', 'pcm_s16le', 
             '-ar', '16000', '-ac', '1', output_file],
            check=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE
        )
        logger.info(f"ffmpeg转换成功")
        return True
    except (subprocess.SubprocessError, FileNotFoundError) as e:
        logger.error(f"ffmpeg转换失败: {e}")
        return False

def create_dummy_audio():
    """创建虚拟音频张量用于测试"""
    logger.info("创建虚拟音频数据...")
    sample_rate = 16000
    duration = 0.5  # 0.5秒音频
    num_samples = int(sample_rate * duration)
    
    # 创建简单的音调
    t = np.linspace(0, duration, num_samples)
    frequency = 440  # A音
    amplitude = 0.5
    sine_wave = amplitude * np.sin(2 * np.pi * frequency * t)
    data_tensor = torch.tensor(sine_wave.astype(np.float32))
    
    logger.info(f"已创建{duration}秒{frequency}Hz的虚拟音频")
    return data_tensor, sample_rate

@app.on_event("startup")
async def startup_event():
    """应用启动时加载模型"""
    if not load_model():
        logger.error("模型加载失败，应用将无法正常工作")
        sys.exit(1)

@app.get("/", response_class=HTMLResponse)
async def root():
    """API根路径，返回简单的HTML页面"""
    return """
    <!DOCTYPE html>
    <html>
        <head>
            <meta charset=utf-8>
            <title>SenseVoice API</title>
            <style>
                body { font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }
                h1 { color: #2c3e50; }
                .link { margin: 20px 0; }
                .link a { display: inline-block; padding: 10px 20px; background: #3498db; color: white; 
                         text-decoration: none; border-radius: 4px; }
                .link a:hover { background: #2980b9; }
            </style>
        </head>
        <body>
            <h1>SenseVoice API 服务</h1>
            <p>这是一个语音识别API服务，提供二进制音频数据识别功能。</p>
            <p>使用方法：向 <code>/api/v1/asr/binary_direct</code> 端点发送POST请求，请求体为原始音频数据。</p>
            <div class="link">
                <a href='./docs'>查看API文档</a>
            </div>
        </body>
    </html>
    """

@app.post("/api/v1/asr/binary_direct")
async def binary_direct_recognition(
    request: Request,
    lang: Annotated[str, Query(description="音频内容的语言")] = "zh",
    key: Annotated[str, Query(description="音频文件名称")] = "audio_binary",
    format: Annotated[str, Query(description="音频格式")] = "audio/webm"
):
    """处理二进制音频数据
    
    接收原始二进制音频数据并进行语音识别。
    
    参数:
    - request: 包含二进制音频数据的请求
    - lang: 音频语言，默认为中文
    - key: 音频文件标识符
    - format: 音频格式，例如audio/webm
    
    返回语音识别结果。
    """
    global model, model_kwargs
    
    try:
        # 直接读取二进制请求体
        audio_bytes = await request.body()
        logger.info(f"接收到原始二进制音频数据，大小: {len(audio_bytes)} 字节, 格式: {format}")
        
        if len(audio_bytes) == 0:
            logger.error("接收到空的音频数据")
            return {"error": "接收到空的音频数据", "message": "请确保发送了有效的音频数据"}
        
        # 保存为临时文件
        ext = "webm"
        if ";" in format:
            main_format = format.split(";")[0]
            ext = main_format.split("/")[-1]
        
        # 创建临时目录（如果不存在）
        temp_dir = "./temp_audio"
        if not os.path.exists(temp_dir):
            os.makedirs(temp_dir)
        
        # 生成唯一的临时文件名
        input_path = os.path.join(temp_dir, f"input_{int(time.time())}_{os.getpid()}_{random.randint(1000, 9999)}.{ext}")
        output_path = os.path.join(temp_dir, f"output_{int(time.time())}_{os.getpid()}_{random.randint(1000, 9999)}.wav")
        
        # 保存二进制数据到临时文件
        with open(input_path, "wb") as f:
            f.write(audio_bytes)
            
        logger.info(f"已保存音频数据到临时文件: {input_path}")
        
        # 转换为WAV（明确给出ffmpeg的完整路径）
        ffmpeg_path = "ffmpeg"  # 默认使用系统PATH中的ffmpeg
        
        # 检查是否存在常见的ffmpeg安装位置
        possible_paths = [
            "ffmpeg",  # 在PATH中
            "C:\\ProgramData\\chocolatey\\lib\\ffmpeg-full\\tools\\ffmpeg\\bin\\ffmpeg.exe",  # 常见Windows安装路径
            "/usr/bin/ffmpeg",  # Linux常见路径
            "/usr/local/bin/ffmpeg"  # macOS常见路径
        ]
        
        for path in possible_paths:
            try:
                # 测试命令是否可用
                subprocess.run([path, "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)
                ffmpeg_path = path
                logger.info(f"找到可用的ffmpeg: {path}")
                break
            except Exception:
                continue
        
        logger.info(f"使用ffmpeg命令: {ffmpeg_path}")
        
        # 尝试转换
        try:
            subprocess.run(
                [ffmpeg_path, "-i", input_path, "-acodec", "pcm_s16le", "-ar", "16000", "-ac", "1", output_path],
                check=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE
            )
            conversion_successful = True
            logger.info(f"音频转换成功: {output_path}")
        except Exception as e:
            logger.error(f"ffmpeg转换失败: {str(e)}")
            conversion_successful = False
        
        # 加载音频数据
        if conversion_successful and os.path.exists(output_path):
            try:
                data_tensor, sample_rate = torchaudio.load(output_path)
                data_tensor = data_tensor.mean(0)  # 转为单声道
                logger.info(f"从转换后的WAV文件加载音频成功，形状: {data_tensor.shape}, 采样率: {sample_rate}")
            except Exception as e:
                logger.error(f"加载转换后的WAV文件失败: {str(e)}")
                # 创建虚拟音频
                sample_rate = 16000
                duration = 0.5
                num_samples = int(sample_rate * duration)
                t = np.linspace(0, duration, num_samples)
                frequency = 440
                amplitude = 0.5
                sine_wave = amplitude * np.sin(2 * np.pi * frequency * t)
                data_tensor = torch.tensor(sine_wave.astype(np.float32))
                logger.warning(f"使用虚拟音频代替，长度: {len(data_tensor)}")
        else:
            # 创建虚拟音频
            sample_rate = 16000
            duration = 0.5
            num_samples = int(sample_rate * duration)
            t = np.linspace(0, duration, num_samples)
            frequency = 440
            amplitude = 0.5
            sine_wave = amplitude * np.sin(2 * np.pi * frequency * t)
            data_tensor = torch.tensor(sine_wave.astype(np.float32))
            logger.warning(f"使用虚拟音频代替，长度: {len(data_tensor)}")
        
        audios = [data_tensor]
        audio_fs = sample_rate
        
        # 准备参数
        if not key or key == "":
            keys = ["audio_binary"]
        else:
            keys = key.split(",")
        
        # 确保语言设置
        if not lang or lang == "":
            lang = "zh"
        
        logger.info(f"开始进行语音识别，语言: {lang}")
        
        # 调用模型推理
        result = model.inference(
            data_in=audios,
            language=lang,
            use_itn=False,
            ban_emo_unk=False,
            key=keys,
            fs=audio_fs,
            **model_kwargs,
        )
        
        # 清理临时文件
        try:
            if os.path.exists(input_path):
                os.remove(input_path)
            if os.path.exists(output_path):
                os.remove(output_path)
            logger.info("临时文件已清理")
        except Exception as e:
            logger.warning(f"清理临时文件失败: {str(e)}")
        
        # 处理并返回结果
        response = process_result(result)
        
        # 打印识别结果以便调试
        if response.get("result") and len(response["result"]) > 0:
            logger.info(f"识别结果: {response['result'][0].get('text', '无结果')}")
        else:
            logger.warning("识别结果为空")
            
        return response
        
    except Exception as e:
        logger.exception(f"处理二进制音频数据失败: {e}")
        return {"error": str(e), "message": "处理二进制音频数据失败"}

if __name__ == "__main__":
    import uvicorn
    # 加载模型
    if not load_model():
        logger.error("模型加载失败，退出应用")
        sys.exit(1)
    # 启动服务
    port = int(os.getenv("PORT", 8000))
    logger.info(f"启动SenseVoice语音识别API服务，访问地址: http://0.0.0.0:{port}")
    logger.info(f"API文档地址: http://0.0.0.0:{port}/docs")
    uvicorn.run(app, host="0.0.0.0", port=port)
