# coding=utf-8
import time
from faster_whisper import WhisperModel
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


class FasterWhisperModel:
    def __init__(self, model_path, device="cpu", compute_type="int8", local_files_only=False, num_workers=5, download_root=None):
        self.model_path = model_path
        self.device = device
        self.compute_type = compute_type
        self.local_files_only = local_files_only
        self.num_workers = num_workers
        self.download_root = download_root
        self.model = None
        self.load_model()

    def load_model(self):
        if self.local_files_only:
            self.model = WhisperModel(self.model_path, device=self.device, compute_type=self.compute_type,
                                      local_files_only=self.local_files_only, num_workers=self.num_workers)
        else:
            self.model = WhisperModel(self.model_path, device=self.device, compute_type=self.compute_type,
                                      num_workers=self.num_workers, download_root=self.download_root)

    def transcribe(self, audio_path, language='zh', beam_size=5, vad_filter=True, vad_parameters=None):
        return self.model.transcribe(audio_path, language=language, beam_size=beam_size, vad_filter=vad_filter,
                                     vad_parameters=vad_parameters)


def main():
    # model_size = "tiny"
    model_size = "base"
    # model_size = "large-v3"
    model_path = r"D:\WorkSpace\version_controller\audio-to-text\sources\models\faster-whisper\base"
    st = time.perf_counter()
    # model = WhisperModel(model_size, device="cpu", compute_type="int8", download_root=r"D:\WorkSpace\version_controller\audio-to-text\sources\models\faster-whisper")
    # model = WhisperModel(model_path, device="cpu", compute_type="int8", local_files_only=True)
    model = WhisperModel(model_path, device="cpu", compute_type="int8", local_files_only=True, num_workers=10)

    segments, info = model.transcribe("sources/006.2.2 视觉套装硬件介绍.wav", language='zh',
                                      beam_size=5,
                                      vad_filter=True,
                                      vad_parameters=dict(min_silence_duration_ms=1000),
                                      )
    print("Detected language '%s' with probability %f" % (info.language, info.language_probability))

    for segment in segments:
        print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))

    print("Elapsed time: %.2fs" % (time.perf_counter() - st))

    # num_workers = 5: Elapsed time: 28.20s
    # 无：Elapsed time: 25.17s
    # num_workers = 10: Elapsed time: 27.32s


if __name__ == '__main__':
    main()
