import os
# import whisper
from pyannote.audio import Pipeline
from splitUtils import diarize_text
import concurrent.futures
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks


# 输出位置
output_dir = './pyannote-whisper'


from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

# 语音转文字的模型
inference_pipeline = pipeline(
    task=Tasks.auto_speech_recognition,
    model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
    model_revision="v1.2.4")

# rec_result = inference_pipeline(audio_in='1002.wav')

# with open('result.txt', 'w', encoding='utf-8') as f:
#     print(rec_result, file=f)

# # print(rec_result)



def process_audio(file_path):
    print("----------1")
    asr_result = inference_pipeline(audio_in=file_path)
    print("-----------2.2")
    # 语者分离pipeline
    pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token="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")
    # 使用显卡加速
    import torch
    pipeline.to(torch.device("cuda"))
    #num_speakers 几个说话者，可以不带
    diarization_result = pipeline(file_path, num_speakers=2)
    # 转文字结果
    print(diarization_result)
    # 进行语着分离
    final_result = diarize_text(asr_result, diarization_result)
    print("-----------5")
    # 输出结果
    output_file = os.path.join(output_dir, os.path.basename(file_path)[:-4] + '.txt')
    with open(output_file, 'w') as f:
        for seg, spk, sent in final_result:
            line = f'{seg.start:.2f} {seg.end:.2f} {spk} {sent}\n'
            f.write(line)
            print(line)

# 判断输出文件夹是否存在
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
wave_dir = './pyannote-whisper'
# 获取当前目录下所有wav文件名
wav_files = [os.path.join(wave_dir, file) for file in os.listdir(wave_dir) if file.endswith('.wav')]


# 处理每个wav文件
with concurrent.futures.ThreadPoolExecutor() as executor:
    executor.map(process_audio, wav_files)

print('处理完成！')