from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import subprocess
import uuid
import aiohttp  # 使用 aiohttp 进行异步 HTTP 请求
import asyncio
import ffmpeg
from pyannote.audio import Pipeline
import torch
import torchaudio
import torchaudio.transforms as transforms
from datetime import timedelta
from pyannote.core import Timeline, Segment
import os

app = FastAPI()

# 初始化 PyTorch 设备（如果可用则使用 CUDA）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
overlap_pipeline = Pipeline.from_pretrained(r"D:\python\torchaudio\models\overlappedSpeechDetection\config.yaml")

overlap_pipeline.instantiate({
    "min_duration_on": 0.0,   # 重叠语音的最小持续时间
    "min_duration_off": 0.0   # 两个重叠段之间的最小间隔
})

overlap_pipeline.to(device)

# 格式化时间的函数
def format_time(seconds):
    return str(timedelta(seconds=seconds))

# 异步下载并转换音频
async def download_and_convert_audio(url, unique_id):
    local_filename = f"audio_{unique_id}.aac"
    wav_filename = f"audio_{unique_id}.wav"

    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            if response.status != 200:
                raise HTTPException(status_code=400, detail="Failed to download audio file")
            with open(local_filename, 'wb') as f:
                f.write(await response.read())

    # 异步调用 ffmpeg 转换音频格式
    loop = asyncio.get_event_loop()
    await loop.run_in_executor(None, lambda: ffmpeg.input(local_filename).output(wav_filename).run(
        capture_stdout=True, capture_stderr=True, overwrite_output=True
    ))
    return local_filename, wav_filename

# 异步重叠检测
async def process_segments(overlap_pipeline, waveform):
    # 直接对整个音频进行重叠检测
    overlap_result = overlap_pipeline({"uri": "audio", "waveform": waveform, "sample_rate": 16000})

    unique_overlapping_segments = set()

    # 提取重叠语音段落
    for overlap in overlap_result.get_timeline().support():
        unique_overlapping_segments.add(Segment(overlap.start, overlap.end))

    # 创建 Timeline 对象并合并接近的重叠片段
    timeline = Timeline(segments=unique_overlapping_segments)
    merged_timeline = timeline.support()

    # 返回合并后的重叠时间段列表
    return [{"startTime": overlap.start, "endTime": overlap.end} for overlap in merged_timeline]

# 定义请求体的数据模型
class AudioUrlRequest(BaseModel):
    audioUrl: str

@app.post('/process_audio')
async def process_audio(request: AudioUrlRequest):
    audio_url = request.audioUrl

    if not audio_url:
        raise HTTPException(status_code=400, detail="audioUrl is required")

    # 生成唯一文件 ID
    unique_id = str(uuid.uuid4())[:8]

    # 异步下载并转换音频
    local_filename, wav_filename = await download_and_convert_audio(audio_url, unique_id)

    # 加载和处理音频
    waveform, sample_rate = torchaudio.load(wav_filename)
    waveform = transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform)
    waveform = transforms.Vad(sample_rate=16000)(waveform)

    # 异步处理重叠检测
    time_segments = await process_segments(overlap_pipeline, waveform)

    # 清理临时文件
    os.remove(local_filename)
    os.remove(wav_filename)

    # 返回结果
    return {
        "code": 10000,
        "msg": "success",
        "data": {
            "timeList": time_segments
        }
    }

if __name__ == '__main__':
    import uvicorn
    uvicorn.run(app, host='0.0.0.0', port=5000)
