#!/usr/bin/env python3
# Author: Armit
# Create Time: 周四 2025/08/21

# 用1s的数据微调 whisper 预训练模型
# 代码参考:
# - https://huggingface.co/blog/fine-tune-whisper
# - https://github.com/yeyupiaoling/Whisper-Finetune/blob/master/finetune.py
# - https://github.com/openai/whisper/discussions/1454

import os
import random
import shutil
from time import time
from pathlib import Path
from argparse import ArgumentParser
from dataclasses import dataclass
from typing import Any, List, Dict, Union

import torch
from torch.utils.data import Dataset
import torch.nn.functional as F
from datasets import load_dataset, Split
from transformers import TrainerCallback
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from peft import LoraConfig, AdaLoraConfig, PeftModel, get_peft_model
import librosa
import numpy as np
from numpy import ndarray
import pyloudnorm as pyln

from utils import MODELS, LOG_PATH, LANG_TO_LANGUAGE, SAMPLE_RATE, TARGET_LOUNDNESS, LORA_TARGET_MODULES, get_whisper_auto, now_datetime_str

if 'TYPE_CHECKING':
  from torch import Tensor
  from transformers import TrainingArguments, TrainerState, TrainerControl
  from transformers.feature_extraction_utils import BatchFeature
  from transformers.trainer_utils import EvalPrediction
  from utils import WhisperProcessor

if 'cudnn':
  torch.backends.cudnn.enabled = True
  torch.backends.cudnn.benchmark = True
  torch.backends.cudnn.deterministic = False
  torch.backends.cudnn.allow_tf32 = True
  torch.set_float32_matmul_precision('medium')


class MixedDataset(Dataset):

  def __init__(self, split:Split, limit:int, processor:WhisperProcessor):
    super().__init__()

    self.processor = processor
    self.vocab = self.processor.tokenizer.get_vocab()
    self.startoftranscript = self.vocab['<|startoftranscript|>']
    self.transcribe = self.vocab['<|transcribe|>']
    self.endoftext = self.vocab['<|endoftext|>']
    if '<|nospeech|>' in self.vocab.keys():
      self.nospeech = self.vocab['<|nospeech|>']
      self.notimestamps = None
    else: # 兼容旧模型 (e.g. tiny)
      self.nospeech = self.vocab['<|nocaptions|>']
      self.notimestamps = self.vocab['<|notimestamps|>']
    self.token_bo = self.vocab['<|bo|>']
    self.token_kk = self.vocab['<|kk|>']
    self.meter = pyln.Meter(SAMPLE_RATE)

    # 每个数据集抽样最多limit个样本，统合成一个数据集索引，并打乱顺序
    self.split = split
    self.datasets = {lang: load_dataset('google/fleurs', lang, split=split, trust_remote_code=True) for lang in LANG_TO_LANGUAGE}
    self.indexes = []   # [i_lang, j_index]
    for i, dataset in self.datasets.items():
      nlen = len(dataset)
      samples = random.sample(range(nlen), limit) if nlen > limit else range(nlen)
      for j in samples:
        self.indexes.append((i, j))
    random.seed(114514)
    random.shuffle(self.indexes)

    # 数据增强
    self.speed_rates = None
    self.augment_configs = [
      {
        'type': 'speed',
        'params': {
          'min_speed_rate': 0.8,
          'max_speed_rate': 1.2,
          'num_rates': 5,
        },
        'prob': 0.5,
      },
      {
        'type': 'shift',
        'params': {
          'min_shift_ms': -5,
          'max_shift_ms': +5,
        },
        'prob': 0.0,
      },
      {
        'type': 'volume',
        'params': {
          'min_gain_dBFS': -15,
          'max_gain_dBFS': +15,
        },
        'prob': 0.5,
      }
    ]

  def __len__(self):
    return len(self.indexes)

  def __getitem__(self, idx):
    i, j = self.indexes[idx]
    item = self.datasets[i][j]
    language = LANG_TO_LANGUAGE[i]

    # 重采样
    y  = item['audio']['array']
    sr = item['audio']['sampling_rate']
    try: y = pyln.normalize.loudness(y, self.meter.integrated_loudness(y), TARGET_LOUNDNESS)
    except: pass
    if sr != SAMPLE_RATE:
      y = self.augment_resample(y, sr, SAMPLE_RATE)
    # 随机裁剪1s
    n_samples = len(y)
    if n_samples > SAMPLE_RATE:
      cp = random.randrange(0, n_samples - SAMPLE_RATE)
      y = y[cp : cp + SAMPLE_RATE]
    # 数据增强
    if self.split == Split.TRAIN:
        y = self.augment(y)
    # 制作label: "<|startoftranscript|><|lang|><|transcribe|><|notimestamps|><|endoftext|>"
    if language in ['zulu', 'kyrgyz']:
      batch: BatchFeature = self.processor(audio=y, sampling_rate=sr)
      if language == 'zulu':
        lang_token = self.token_bo
      elif language == 'kyrgyz':
        lang_token = self.token_kk
      #batch['labels'] = [self.startoftranscript, lang_token, self.transcribe, self.notimestamps, self.endoftext]
      batch['labels'] = [self.startoftranscript, lang_token]
    else:
      self.processor.tokenizer.set_prefix_tokens(language=language)
      batch: BatchFeature = self.processor(audio=y, sampling_rate=sr, text='')

    if False:
      import matplotlib.pyplot as plt
      plt.imshow(batch.input_features[0])
      plt.show()

    return batch

  def augment(self, y:ndarray):
    for config in self.augment_configs:
      if config['type'] == 'speed' and config['prob'] and random.random() < config['prob']:
        if self.speed_rates is None:
          min_speed_rate, max_speed_rate, num_rates = config['params']['min_speed_rate'], config['params']['max_speed_rate'], config['params']['num_rates']
          self.speed_rates = np.linspace(min_speed_rate, max_speed_rate, num_rates, endpoint=True)
        rate = random.choice(self.speed_rates)
        y = self.augment_speed(y, speed_rate=rate)
      if config['type'] == 'volume' and config['prob'] and random.random() < config['prob']:
        min_gain_dBFS, max_gain_dBFS = config['params']['min_gain_dBFS'], config['params']['max_gain_dBFS']
        gain = random.randint(min_gain_dBFS, max_gain_dBFS)
        y = self.augment_volume(y, gain=gain)
    return y

  @staticmethod
  def augment_resample(y:ndarray, orig_sr:int, target_sr:int):
    return librosa.resample(y, orig_sr=orig_sr, target_sr=target_sr)

  @staticmethod
  def augment_speed(y:ndarray, speed_rate:float):
    if speed_rate <= 0 or speed_rate == 1.0: return y
    old_length = y.shape[0]
    new_length = int(old_length / speed_rate)
    old_indices = np.arange(old_length)
    new_indices = np.linspace(start=0, stop=old_length, num=new_length)
    return np.interp(new_indices, old_indices, y).astype(np.float32)

  @staticmethod
  def augment_volume(y:ndarray, gain:int):
    return y * 10.**(gain / 20.)


@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
  processor: Any
  def __call__(self, features: List[Dict[str, Union[List[int], Tensor]]]) -> Dict[str, Tensor]:
    # split inputs and labels since they have to be of different lengths and need different padding methods
    # first treat the audio inputs by simply returning torch tensors
    input_features = [{'input_features': feature['input_features'][0]} for feature in features]
    batch = self.processor.feature_extractor.pad(input_features, return_tensors='pt')
    # get the tokenized label sequences
    label_features = [{'input_ids': feature['labels']} for feature in features]
    # pad the labels to max length
    labels_batch = self.processor.tokenizer.pad(label_features, return_tensors='pt')
    # replace padding with -100 to ignore loss correctly
    labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)
    # if bos token is appended in previous tokenization step,
    # cut bos token here as it's append later anyways
    if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():
      labels = labels[:, 1:]
    batch['labels'] = labels
    return batch


class SavePeftModelCallback(TrainerCallback):
  def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs):
    # 保存效果最好的模型
    best_checkpoint_folder = os.path.join(SAVE_BEST_PATH, f'{PREFIX_CHECKPOINT_DIR}-best')
    # 因为只保存最新1个检查点，所以要确保不是之前的检查点
    if state.best_model_checkpoint is not None and os.path.exists(state.best_model_checkpoint):
      if os.path.exists(best_checkpoint_folder):
        shutil.rmtree(best_checkpoint_folder)
      shutil.copytree(state.best_model_checkpoint, best_checkpoint_folder)
    print(f'效果最好的检查点为：{state.best_model_checkpoint}，评估结果为：{state.best_metric}')
    return control


class WhisperLanguageIdenityDetectTrainer(Seq2SeqTrainer):
  def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
    labels = inputs.pop("labels")
    loss, logits = self._compute_loss(model, inputs, labels)
    return (loss, {"logits": logits}) if return_outputs else loss
  def _compute_loss(self, model, inputs, labels):
    # 仿 WhisperGenerationMixin.detect_language 的逻辑，推理一个token即可
    B = labels.shape[0]
    decoder_input_ids = torch.ones((B, 1), device=inputs.input_features.device, dtype=torch.long) * model.generation_config.decoder_start_token_id
    logits = model(**inputs, decoder_input_ids=decoder_input_ids, use_cache=False).logits   # [B, L=1, D]
    D = logits.shape[-1]
    preds = logits.reshape(-1, D)
    truth = labels[:, 1].reshape(-1)  # 取 <|lang|>
    lid_loss = F.cross_entropy(preds, truth, ignore_index=-100)
    return lid_loss, logits


# call on_epoch()
# WARN: 这个函数会导致VRAM使用暴增，随后触发内存交换，但是目前没有更好的办法
@torch.inference_mode
def compute_metrics(pred:EvalPrediction) -> Dict[str, float]:
  truth = pred.label_ids[:, 1]                    # [B, L=4, D]
  preds = pred.predictions[:, 0, :].argmax(-1)    # [B, L=1, D] 不知为何长度只有1
  return {
    'acc': (truth == preds).sum() / truth.shape[0],
  }


def run(args):
  print('加载预处理器/模型...')
  model, processor = get_whisper_auto(args.model_path)

  print('加载数据集 (~72s)...')
  ts_start = time()
  train_dataset = MixedDataset(Split.TRAIN, args.limit_trainset, processor)
  test_dataset = MixedDataset(Split.TEST, args.limit_valset, processor)
  print(f'训练数据：{len(train_dataset)}，测试数据：{len(test_dataset)}')
  data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)
  print(f'>> [Load Datasets] timecost: {time() - ts_start:.3f}s')

  param_cnt_base = sum(p.numel() for p in model.parameters() if p.requires_grad)
  print('>> param_cnt_base:', param_cnt_base)
  if args.resume:
    print(f'加载LoRA存档点: {args.resume}')
    model = PeftModel.from_pretrained(model, args.resume, is_trainable=True)
  else:
    print(f'添加LoRA模块...')
    if args.adalora:
      config = AdaLoraConfig(init_r=12, target_r=4, beta1=0.85, beta2=0.85, tinit=200, tfinal=1000, deltaT=10, lora_alpha=32, lora_dropout=0.1, orth_reg_weight=0.5, target_modules=LORA_TARGET_MODULES, total_step=args.epochs * len(train_dataset))
    else:
      config = LoraConfig(r=32, lora_alpha=64, target_modules=LORA_TARGET_MODULES, lora_dropout=0.05, bias='none')
    model = get_peft_model(model, config)
  param_cnt_lora = sum(p.numel() for p in model.parameters() if p.requires_grad)
  print('>> param_cnt_lora:', param_cnt_lora, f'({param_cnt_lora / param_cnt_base:.3%})')
  model.print_trainable_parameters()

  print('构造训练器...')
  trainer_args = Seq2SeqTrainingArguments(
    output_dir=args.output,                       # 保存检查点和意志的目录
    per_device_train_batch_size=args.batch_size,  # 训练batch_size大小
    per_device_eval_batch_size=args.batch_size,   # 评估batch_size大小
    gradient_accumulation_steps=args.grad_acc,    # 训练梯度累计步数
    learning_rate=args.learning_rate,             # 学习率大小
    warmup_steps=args.warmup_steps,               # 预热步数
    num_train_epochs=args.epochs,                 # 微调训练轮数
    save_strategy='steps',                        # 指定按照步数保存检查点
    eval_strategy='steps',                        # 指定按照步数评估模型
    fp16=True,                                    # 是否使用半精度训练
    torch_compile=False,                          # 使用Pytorch2.0的编译器 (Windows不可用)
    load_best_model_at_end=True,                  # 指定是否在结束时加载最优模型
    report_to=['tensorboard'],                    # 指定使用tensorboard保存log
    eval_steps=500,                               # 指定评估模型的步数
    generation_max_length=2,                      # 评估时最大生成token数
    save_steps=500,                               # 指定保存检查点的步数
    save_total_limit=3,                           # 只保存最新检查点的数量
    optim='adamw_torch',                          # 指定优化方法
    ddp_find_unused_parameters=None,              # 分布式训练设置
    dataloader_num_workers=args.num_workers,      # 设置读取数据的线程数量
    logging_steps=50,                             # 指定打印log的步数
    remove_unused_columns=True,                   # 删除模型不需要的数据列
    label_names=['labels'],                       # 与标签对应的输入字典中的键列表
    metric_for_best_model='acc',
    greater_is_better=True,
  )
  trainer = WhisperLanguageIdenityDetectTrainer(
    args=trainer_args,
    model=model,
    train_dataset=train_dataset,
    eval_dataset=test_dataset,
    data_collator=data_collator,
    processing_class=processor.feature_extractor,
    compute_metrics=compute_metrics,
    callbacks=[SavePeftModelCallback],
  )

  print('开始训练...')
  model.config.use_cache = False
  trainer._load_from_checkpoint = lambda *args, **kwargs: None    # 阻止再次加载
  trainer.train(resume_from_checkpoint=args.resume)
  trainer.save_state()
  #model.config.use_cache = True
  #model.save_pretrained(str(args.output / 'checkpoint-final'))


if __name__ == '__main__':
  '''
  - BareOS: Mem=4GB
  - original (n_frame=1500) 跑验证集时会导致VMem迅速占满，然后内存交换、颠簸 (100个batch为周期)
    - model=base bs=16 limit=1200/300: Mem=16.8GB VMem=5.5GB CPU=27% GPU=80% speed=1.4it/s steps=14080 timecost~=4h
  - hijack (n_frame=128)
    - model=base bs=16 limit=1200/300: Mem=27.7GB VMem=1.8GB CPU=68% GPU=65% speed=7.5it/s steps=14080 timecost~=30min
    - model=base bs=32 limit=1200/300: Mem=27.7GB VMem=2.0GB CPU=66% GPU=70% speed=5.5it/s steps=7040  timecost~=20min
    - model=base bs=48 limit=1200/300: Mem=28.1GB VMem=2.6GB CPU=66% GPU=70% speed=4.3it/s steps=4700  timecost~=18min
  '''
  parser = ArgumentParser()
  parser.add_argument('-M', '--model', default='base', choices=MODELS, help='model type')
  parser.add_argument('-O', '--output', type=Path, help='ckpt save folder')
  parser.add_argument('-E', '--epochs', default=20, type=int)
  parser.add_argument('-B', '--batch_size', default=16, type=int)
  parser.add_argument('-lr', '--learning_rate', default=1e-5, type=int)
  parser.add_argument('-warm', '--warmup_steps', default=1000, type=int)
  parser.add_argument('--limit_trainset', default=1500, type=int, help='limit samples for each train dataset')
  parser.add_argument('--limit_valset', default=100, type=int, help='limit samples for each val dataset')
  parser.add_argument('--grad_acc', default=1, type=int)
  parser.add_argument('--num_workers', default=0, type=int)
  parser.add_argument('--adalora', action='store_true')
  parser.add_argument('--resume', type=Path)
  args = parser.parse_args()

  args.model_path = f'openai/whisper-{args.model}'
  if args.output is None:
    args.output = LOG_PATH / f'whisper-{args.model}.{now_datetime_str()}'
    SAVE_BEST_PATH = str(args.output)
  print('>> model:', args.model_path)
  print('>> output_path:', args.output)
  print('>> n_lang:', len(LANG_TO_LANGUAGE))

  ts_start = time()
  run(args)
  print(f'>> timecost: {time() - ts_start:.3f}s')
