import torch
import pdb
from transformers import AutoModelForSpeechSeq2Seq
import numpy as np

device = "cuda:0" if torch.cuda.is_available() else "cpu"
#device = 'cpu'
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

model_id = "/apdcephfs_qy3/share_976139/users/joyounglv/pretrained_ckpt/whisper-large-v3"

model = AutoModelForSpeechSeq2Seq.from_pretrained(
    model_id, torch_dtype=torch_dtype, use_safetensors=True
)
model.to(device)

if not hasattr(torch.library, 'register_fake'):
    def register_fake(*args, **kwargs):
        pass  # 定义一个空的占位函数
    torch.library.register_fake = register_fake

from transformers import WhisperProcessor 

processor = WhisperProcessor.from_pretrained(model_id)

# pipe = pipeline(
#     "automatic-speech-recognition",
#     model=model,
#     tokenizer=processor.tokenizer,
#     feature_extractor=processor.feature_extractor,
#     torch_dtype=torch_dtype,
#     device=device,
# )
generate_kwargs = {
    "num_beams": 1,
    "condition_on_prev_tokens": True,
    "compression_ratio_threshold": 1.35,  # zlib compression ratio threshold (in token space)
    "temperature": 0.0,
    "logprob_threshold": -1.0,
    "no_speech_threshold": 0.6,
    "return_timestamps": "word",
    "language": "chinese"
}



import wave
wav_file = wave.open('/apdcephfs_qy3/share_976139/users/joyounglv/asr_data/data_aishell/test/S0770/BAC009S0770W0310.wav', 'rb')
n_channels = wav_file.getnchannels()
sampwidth = wav_file.getsampwidth()
framerate = wav_file.getframerate()
n_frames = wav_file.getnframes()
frames = wav_file.readframes(n_frames)
wav_file.close()
dtype = np.int16
audio_data = np.frombuffer(frames, dtype=dtype)



inputs = processor(
    audio_data,
    sampling_rate=framerate,
    return_tensors="pt",
    truncation=False,
    padding="longest",
    return_attention_mask=True,
)

inputs = inputs.to(device, dtype=torch_dtype)

result = model.generate(**inputs, **generate_kwargs)
# result = pipe(audio=audio_data,text='普通话转写结果如下：', generate_kwargs=generate_kwargs)
# print(result["text"])
print(processor.tokenizer.decode(result[0]))

