#!/usr/bin/env python3
# Author: Armit
# Create Time: 周五 2025/08/22

# 使用预训练whisper模型进行推理 (ASR)
# NOTE: 不能直接字面/token对比，误判率较高

from time import time
from pathlib import Path
from argparse import ArgumentParser

import torch
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from transformers.generation import LogitsProcessor
import librosa as L

from utils import MODELS

device = 'cpu'


class MyLogitsProcessorList(LogitsProcessor):
  def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
    return scores


def run(args):
  model_path = f'openai/whisper-{args.model}'
  processor: WhisperProcessor = WhisperProcessor.from_pretrained(model_path)
  model: WhisperForConditionalGeneration = WhisperForConditionalGeneration.from_pretrained(model_path).to(device)
  #model.config.forced_decoder_ids = None
  #logits_processor = MyLogitsProcessorList()

  if args.input.is_file():
    fps = [args.input]
  else:
    fps = args.input.iterdir()
  for fp in fps:
    y, sr = L.load(fp, sr=None)
    input_features = processor(y, sampling_rate=sr, return_tensors='pt', device=device).input_features
    predicted_ids = model.generate(
      input_features,
      #logits_processor=[logits_processor],
      language='en',
    )
    transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False)
    print(transcription)


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-M', '--model', default='base', choices=MODELS, help='model name')
  parser.add_argument('-I', '--input', required=True, type=Path, help='input audio folder')
  args = parser.parse_args()

  print('>> Input:', args.input)

  ts_start = time()
  run(args)
  print(f'>> timecost: {time() - ts_start:.3f}s')
