"""
分离音视频的说话人语音
"""

from threading import RLock
from pyannote.audio import Model
from pyannote.audio.pipelines import SpeakerDiarization
from typing import Union, List, Dict, Optional
from pathlib import Path
from moviepy.editor import VideoFileClip, AudioFileClip
from utils.hash import sha1_hash_data
import tempfile
import shutil
import torch
import os
from loguru import logger


class Diarization(object):
    lock = RLock()

    def __new__(cls, *args, **kwargs):
        with Diarization.lock:
            if not hasattr(Diarization, "_instance"):
                Diarization._instance = object.__new__(cls)
                # cls.diarization_pipeline = Pipeline.from_pretrained('./config.yaml')
                emb_model = '/root/autodl-tmp/wespeaker-voxceleb-resnet34-LM/pytorch_model.bin'
                embedding = Model.from_pretrained(emb_model)
                seg_model = '/root/autodl-tmp/segmentation-3.0/pytorch_model.bin'
                segmentation = Model.from_pretrained(seg_model)
                device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
                cls.diarization_pipeline = SpeakerDiarization(segmentation=segmentation, embedding=embedding).to(device)
        return Diarization._instance

    def __init__(self):
        # 初始化语音分离模型的参数
        hyper_parameters = {
            "clustering": {
                "method": "centroid",
                "min_cluster_size": 12,
                "threshold": 0.7045654963945799
            },
            "segmentation": {
                "min_duration_off": 0.0
            }
        }
        self.diarization_pipeline.instantiate(hyper_parameters)

    @staticmethod
    def video2audio(video_file: Union[Path, str], save_file: Optional[str] = None) -> str:
        """提取视频中的音频 """
        video_clip = VideoFileClip(video_file)

        if save_file is None:
            _, save_file = tempfile.mkstemp(suffix='.wav')
        video_clip.audio.write_audiofile(save_file)
        return save_file

    @staticmethod
    def audio_transform(audio_file: Union[Path, str], save_file: Optional[str] = None) -> str:
        """音频转换"""
        audio = AudioFileClip(audio_file)
        if save_file is None:
            _, save_file = tempfile.mkstemp(suffix='.wav')
        audio.write_audiofile(save_file)
        return save_file

    def split(self, video_or_audio_file: Union[Path, str], save_dir: Union[Path, str] = None) -> List[Dict]:
        """
        识别音视频文件中的说话人声音，并分割音频
        """
        suffix = Path(video_or_audio_file).suffix.lower()
        if suffix in ['.mp4']:
            audio_file = self.video2audio(video_or_audio_file)
        elif suffix != '.wav':
            audio_file = self.audio_transform(video_or_audio_file)
        else:
            audio_file = video_or_audio_file

        # 说话人声音识别
        results = []
        diarization = self.diarization_pipeline(audio_file)
        Path(save_dir).mkdir(exist_ok=True)
        audio = AudioFileClip(audio_file)
        for segment, _, label in diarization.itertracks(yield_label=True):
            clip_audio = audio.subclip(segment.start, segment.end)
            fs, clip_file = tempfile.mkstemp(suffix='.wav')
            clip_audio.write_audiofile(clip_file)
            os.close(fs)

            res_item = {
                'source': video_or_audio_file,
                'start_time': segment.start,
                'end_time': segment.end,
                'speaker': label
            }

            # 保存音频
            if save_dir:
                speech_name = sha1_hash_data(res_item)
                target_file = (Path(save_dir) / f'{speech_name}.wav').absolute().as_posix()
                shutil.move(clip_file, target_file)
            else:
                target_file = None
                Path(clip_file).unlink()

            res_item['wav_file'] = target_file
            results.append(res_item)
        return results


if __name__ == '__main__':
    d = Diarization()
    logger.info("start split")
    results = d.split('/root/test.mp4', save_dir='../datas/speech')
    logger.info("end split")
    import json
    with(open('./split_result.json', 'w', encoding='utf-8', newline='')) as fw:
        json.dump(results, fw, ensure_ascii=False, indent=4)