import numpy as np
import wave
import os

from whisper_live import utils
from whisper_live.transcriber import WhisperModel
from whisper_live.client import TranscriptionClient
def bytes_to_float_array(audio_bytes):
    """
    Convert audio data from bytes to a NumPy float array.

    It assumes that the audio data is in 16-bit PCM format. The audio data is normalized to
    have values between -1 and 1.

    Args:
        audio_bytes (bytes): Audio data in bytes.

    Returns:
        np.ndarray: A NumPy array containing the audio data as float values normalized between -1 and 1.
    """
    raw_data = np.frombuffer(buffer=audio_bytes, dtype=np.int16)
    return raw_data.astype(np.float32) / 32768.0

# # tensorRT 模型
# # transcriber = WhisperTRTLLM(
# #             engine_dir="small.en",
# #             assets_dir="assets",
# #             device="cuda",
# #             is_multilingual=True, #是否支持多语言转录
# #             language=None, #转录的目标语言，如果开启转录但无该值则默认en: language if multilingual else "en"
# #             task="transcribe" #任务类型，默认 transcribe
# #         )

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# fastWhisper模型
transcriber = WhisperModel(
            model_size_or_path= "C:/Users/HP/.cache/huggingface/hub/faster_whisper",
            device="cuda",
            compute_type="float16",
            local_files_only=False,
        )
audio='examples/vad_example.wav'
# 离线版本
# result, info = transcriber.transcribe(
#             audio,
#             initial_prompt=None,
#             language='zh',
#             task="transcribe",
#             vad_filter=True,
#             word_timestamps=True, #需要时间戳
#             vad_parameters={"threshold": 0.5})
# print(result)

# 流式版本
# filename = utils.resample(audio)
# chunk = 4096 * 4
# with wave.open(filename, "rb") as wavfile:
#     while True:
#         data = wavfile.readframes(chunk)
#         print(len(data))
#         if data == b"":
#             break
#         result, info = transcriber.transcribe(
#             bytes_to_float_array(data),
#             initial_prompt=None,
#             language='zh',
#             task="transcribe",
#             vad_filter=True,
#             word_timestamps=True,
#             vad_parameters={"threshold": 0.5})
#         print(result)
#
#     wavfile.close()

