import time
import os
import whisper
from fastapi import FastAPI, UploadFile, File, Query
from fastapi.responses import JSONResponse
import uvicorn
import soundfile as sf
from typing import Optional
import numpy as np
import torch

app = FastAPI(title="Whisper ASR API", description="使用OpenAI Whisper进行语音识别的API")

# 全局加载ASR模型
print("正在加载Whisper模型...")
model = whisper.load_model("large-v3")
print("Whisper模型加载完成！")

# 确保temp文件夹存在
temp_dir = "temp"
if not os.path.exists(temp_dir):
    os.makedirs(temp_dir)
    print(f"创建临时文件夹: {temp_dir}")

def process_audio_in_chunks(audio_path, model, chunk_length_s=5, overlap_length_s=2.0):
    """
    将长音频分成小块进行处理，然后合并结果
    """
    # 加载音频
    audio = whisper.load_audio(audio_path)
    sample_rate = 16000  # whisper使用16kHz采样率
    
    # 计算每个块的样本数
    chunk_length = int(chunk_length_s * sample_rate)
    overlap_length = int(overlap_length_s * sample_rate)
    
    # 如果音频长度小于块长度，直接处理整个音频
    if len(audio) <= chunk_length:
        return model.transcribe(audio_path)
    
    # 分块处理
    all_texts = []
    all_segments = []
    language = None
    
    # 计算起始位置
    starts = list(range(0, len(audio), chunk_length - overlap_length))
    
    for i, start in enumerate(starts):
        # 计算结束位置
        end = min(start + chunk_length, len(audio))
        
        # 如果最后一块太小，就合并到前一块
        if i == len(starts) - 1 and end - start < chunk_length / 2:
            break
            
        print(f"处理音频块 {i+1}/{len(starts)}，时间范围: {start/sample_rate:.2f}s - {end/sample_rate:.2f}s")
        
        # 提取当前块
        chunk = audio[start:end]
        
        # 处理当前块
        options = {"task": "transcribe"}
        if language:
            # 如果已经检测到语言，则使用它
            options["language"] = language
        
        result = model.transcribe(chunk, **options)
        
        # 第一次处理后保存检测到的语言
        if language is None:
            language = result["language"]
        
        # 调整时间戳
        time_offset = start / sample_rate
        for segment in result["segments"]:
            segment["start"] += time_offset
            segment["end"] += time_offset
            all_segments.append(segment)
        
        all_texts.append(result["text"])
    
    # 合并结果
    final_result = {
        "text": " ".join(all_texts),
        "segments": all_segments,
        "language": language
    }
    
    return final_result

@app.get("/")
async def root():
    return {"message": "Whisper ASR API 已启动"}

@app.post("/transcribe/")
async def transcribe_audio(
    file: UploadFile = File(...),
    use_stream: Optional[bool] = Query(True, description="是否使用流式处理，默认为True"),
    chunk_length_s: Optional[float] = Query(5.0, description="流式处理时每个音频块的长度(秒)，默认5秒"),
    overlap_length_s: Optional[float] = Query(2.0, description="流式处理时音频块之间的重叠长度(秒)，默认2秒")
):
    # 记录开始时间
    start_time = time.time()
    
    # 创建临时文件保存上传的音频
    temp_file_path = os.path.join(temp_dir, f"temp_{file.filename}")
    try:
        # 保存上传的文件
        with open(temp_file_path, "wb") as buffer:
            content = await file.read()
            buffer.write(content)
        
        # 获取音频文件信息
        try:
            info = sf.info(temp_file_path)
            audio_info = {
                "duration": info.duration,
                "samplerate": info.samplerate,
                "channels": info.channels,
                "format": info.format,
                "subtype": info.subtype
            }
            audio_duration = info.duration
        except Exception as e:
            # 如果无法获取音频信息，提供默认值
            audio_info = {"error": str(e)}
            audio_duration = 0
            # 如果无法获取音频信息，默认使用非流式处理
            use_stream = False
        
        # 使用Whisper模型进行转录
        used_streaming = False
        if use_stream and audio_duration > chunk_length_s:
            print(f"使用流式处理音频 {file.filename}，总时长: {audio_duration}秒")
            # 自定义流式处理方式
            result = process_audio_in_chunks(
                temp_file_path, 
                model, 
                chunk_length_s=chunk_length_s, 
                overlap_length_s=overlap_length_s
            )
            used_streaming = True
        else:
            print(f"使用标准处理音频 {file.filename}")
            # 标准处理方式
            result = model.transcribe(temp_file_path)
        
        # 计算总耗时
        end_time = time.time()
        processing_time = end_time - start_time
        
        # 准备返回结果
        response_data = {
            "text": result["text"],
            "language": result["language"],
            "processing_time_seconds": processing_time,
            "audio_file": file.filename,
            "audio_duration_seconds": audio_duration,
            "audio_info": audio_info,
            "used_streaming": used_streaming,
            "segments_count": len(result["segments"]) if "segments" in result else 0
        }
        
        return JSONResponse(content=response_data)
    
    finally:
        # 清理临时文件
        if os.path.exists(temp_file_path):
            os.remove(temp_file_path)
            print(f"已删除临时文件: {temp_file_path}")

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000) 