#!/usr/bin/env python3
# Author: Armit
# Create Time: 2025/08/03 

# 文件夹批量推理 whisper 官方模型 / whisper-peft 微调模型 (LID)
# ℹ 速度 44it/s, 跑一次需要~8min

import json
import random
from pathlib import Path
from time import time
from argparse import ArgumentParser
from typing import List, Tuple

import torch
from peft import PeftModel
import librosa as L
from tqdm import tqdm

from utils import MODELS, LANG_TOKEN_TO_LANGUAGE, LANG_TOKEN_FIX_MAP, SUBMIT_PATH, device, get_whisper_auto


@torch.inference_mode
def run(args):
  if args.ckpt_path:    # 使用本地微调模型
    config_fp = args.ckpt_path / 'adapter_config.json'
    with open(config_fp, 'r', encoding='utf-8') as fh:
      cfg = json.load(fh)
    model_path = cfg['base_model_name_or_path']
    model, processor = get_whisper_auto(model_path)
    model = PeftModel.from_pretrained(model, args.ckpt_path, is_trainable=False)
  else:                 # 使用官方预训练模型
    model, processor = get_whisper_auto(args.model)
  model = model.eval().to(device)

  preds: List[Tuple[str, str]] = []
  fps = [fp for fp in Path(args.input).iterdir() if fp.suffix == '.wav' and fp.stem != 'ydata']
  fps.sort(key=lambda e: int(e.stem.split('_')[1]))   # sort by id num
  for fp in tqdm(fps):
    y, sr = L.load(fp, sr=None)
    # 峰值对齐：训练集的语音最大值为0.4左右
    # 实验表明0.8的倍率分数似乎最高（
    y *= 0.8

    input_features = processor(y, sampling_rate=sr, return_tensors='pt', device=device).input_features.to(device=device)
    lang_token = model.detect_language(input_features)
    lang = processor.decode(lang_token, device=device)[2:-2]
    if lang not in LANG_TOKEN_TO_LANGUAGE:
      if lang in LANG_TOKEN_FIX_MAP:
        lang = LANG_TOKEN_FIX_MAP[lang]
      else:
        lang = random.choice(['is', 'am', 'mt', 'so', 'ln', 'zu', 'ha'])
    label = LANG_TOKEN_TO_LANGUAGE[lang]

    preds.append((fp.stem.replace('_vocals', ''), label))

  with open(args.output, 'w', encoding='utf-8') as fh:
    for name, label in preds:
      fh.write(f'{name}\t{label}\n')


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-M', '--model', default='base', choices=MODELS, help='model type')
  parser.add_argument('-C', '--ckpt_path', type=Path, help='finetuned checkpoint-* save folder')
  parser.add_argument('-I', '--input', default='data_preprocessed/testset.mdx-1s', type=Path, help='input audio folder')
  parser.add_argument('-O', '--output', type=Path, help='output result file')
  args = parser.parse_args()

  print('>> Input:', args.input)
  print('>> Output:', args.output)
  ckpt_path: Path = args.ckpt_path
  if ckpt_path:
    print('>> Use local finetuned checkpoint:', ckpt_path)
    assert ckpt_path.is_dir(), '--ckpt_path must be a folder'
    assert (ckpt_path / 'adapter_model.safetensors').is_file(), 'missing peft weight file'
    assert (ckpt_path / 'adapter_config.json').is_file(), 'missing peft config file'
    output_suffix = ckpt_path.parent.stem
  else:
    print('>> Use official pretrained weights')
    output_suffix = args.model
  if args.output is None:
    args.output = SUBMIT_PATH / f'{args.input.stem}-{output_suffix}.txt'

  ts_start = time()
  run(args)
  print(f'>> timecost: {time() - ts_start:.3f}s')
