# https://huggingface.co/openai/whisper-large-v3-turbo

MODEL_PATH = '/home/jie.wei/audio_test/whisper-large-v3-turbo/'

from transformers import pipeline
import torch
import time
from transformers import WhisperTokenizer as original_WhisperTokenizer
from transformers.models.whisper.modeling_whisper import WhisperForConditionalGeneration

import ixformer
import ixformer.functions as ixf_F

# 天数优化加速库
torch.nn.functional.scaled_dot_product_attention = ixf_F.scaled_dot_product_attention
torch.nn.functional.linear = ixf_F.linear

tokenizer = original_WhisperTokenizer.from_pretrained(pretrained_model_name_or_path=MODEL_PATH,
                                                      language="chinese", task="transcribe")
pipe = pipeline(
    "automatic-speech-recognition",
    tokenizer=tokenizer,
    model=MODEL_PATH,
    torch_dtype=torch.float16,
    device="cuda:0",
    model_kwargs={"use_flash_attention_2": False},
)

def transcribe_audio(audio_name, save_path=None):
    start_time = time.time()
    torch.cuda.profiler.start()
    outputs = pipe(
        audio_name,
        # chunk_length_s=30,
        chunk_length_s=30,
        batch_size=8,
        return_timestamps=True,
        generate_kwargs={"task": 'transcribe', 'language': 'chinese'}
    )
    torch.cuda.profiler.stop()
    print('运行时长:', time.time() - start_time)
    for row in outputs['chunks']:
        print(row)


path = "./test.mp3"

transcribe_audio(path, None)
