from flask import Flask, request, jsonify
import subprocess
import uuid
import requests
import ffmpeg
from pyannote.audio import Model, Pipeline
from pyannote.audio.pipelines import VoiceActivityDetection
import torch
import torchaudio
import torchaudio.transforms as transforms
from datetime import timedelta
from pyannote.core import Timeline, Segment
import os
from concurrent.futures import ThreadPoolExecutor
import time

#os.environ['HTTP_PROXY'] = 'http://127.0.0.1:15236'
#os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:15236'

app = Flask(__name__)

# 初始化 PyTorch 设备（如果可用则使用 CUDA）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载模型
# segmentation_model = Model.from_pretrained("pyannote/segmentation-3.0", use_auth_token="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")

segmentation_model = Model.from_pretrained(r"D:\python\torchaudio\models\segmentation3\pytorch_model.bin")
vad_pipeline = VoiceActivityDetection(segmentation=segmentation_model)
# overlap_pipeline = Pipeline.from_pretrained("pyannote/overlapped-speech-detection", use_auth_token="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")

overlap_pipeline = Pipeline.from_pretrained(r"D:\python\torchaudio\models\overlappedSpeechDetection\config.yaml")


HYPER_PARAMETERS = {
    "min_duration_on": 0.0,
    "min_duration_off": 0.0
}
vad_pipeline.instantiate(HYPER_PARAMETERS)
overlap_pipeline.instantiate(HYPER_PARAMETERS)


# 格式化时间的函数
def format_time(seconds):
    return str(timedelta(seconds=seconds))


# 下载并转换音频
def download_and_convert_audio(url, unique_id):
    local_filename = f"audio_{unique_id}.aac"
    wav_filename = f"audio_{unique_id}.wav"
    audio_response = requests.get(url)

    with open(local_filename, 'wb') as f:
        f.write(audio_response.content)

    ffmpeg.input(local_filename).output(wav_filename).run(
        capture_stdout=True, capture_stderr=True, overwrite_output=True
    )
    return local_filename, wav_filename


# 处理 VAD 和重叠检测
def process_segments_double(vad_pipeline, overlap_pipeline, waveform):
    speech_segments = vad_pipeline({"uri": "audio", "waveform": waveform, "sample_rate": 16000})

    unique_overlapping_segments = set()

    def process_segment(segment):
        overlap_result = overlap_pipeline(
            {"uri": "audio", "waveform": waveform, "sample_rate": 16000, "segment": segment})
        for overlap in overlap_result.get_timeline().support():
            unique_overlapping_segments.add(Segment(overlap.start, overlap.end))

    with ThreadPoolExecutor() as executor:
        executor.map(process_segment, speech_segments.get_timeline().support())

    timeline = Timeline(segments=unique_overlapping_segments)
    merged_timeline = timeline.support()

    return [{"startTime": overlap.start, "endTime": overlap.end} for overlap in merged_timeline]

#重叠检测
def process_segments(overlap_pipeline, waveform):
    # 直接对整个音频进行重叠检测
    overlap_result = overlap_pipeline({"uri": "audio", "waveform": waveform, "sample_rate": 16000})

    unique_overlapping_segments = set()

    # 提取重叠语音段落
    for overlap in overlap_result.get_timeline().support():
        unique_overlapping_segments.add(Segment(overlap.start, overlap.end))

    # 创建 Timeline 对象并合并接近的重叠片段
    timeline = Timeline(segments=unique_overlapping_segments)
    merged_timeline = timeline.support()

    # 返回合并后的重叠时间段列表
    return [{"startTime": overlap.start, "endTime": overlap.end} for overlap in merged_timeline]



#audio_url = "https://hl-test-ca.obs.cn-east-3.myhuaweicloud.com/hrjy/lztest/ll.aac"
audio_url = "https://hlyz-test.obs.cn-east-3.myhuaweicloud.com/hrjy/1723533449352-70bc2067-dc89-4fb6-9801-579015ce6acd.aac"
# 生成唯一文件 ID
unique_id = str(uuid.uuid4())[:8]

local_filename, wav_filename = download_and_convert_audio(audio_url, unique_id)

waveform, sample_rate = torchaudio.load(wav_filename)
waveform = transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform)
waveform = transforms.Vad(sample_rate=16000)(waveform)



# 开始计时
start_time = time.time()

#time_segments = process_segments_double(vad_pipeline, overlap_pipeline, waveform)

time_segments = process_segments(overlap_pipeline, waveform)

os.remove(local_filename)
os.remove(wav_filename)

# 结束计时
end_time = time.time()

# 计算并打印处理时间
processing_time = end_time - start_time
print(f"处理时间: {processing_time:.2f} 秒")
print(time_segments)
