#!/usr/bin/env python3
# Author: Armit
# Create Time: 周五 2025/08/22

import json
import random
from pathlib import Path
from typing import List, Tuple, Dict, Union

import torch
from torch.utils.data import Dataset, DataLoader
from transformers import WhisperProcessor, WhisperFeatureExtractor, WhisperForConditionalGeneration
from transformers.modeling_outputs import BaseModelOutput
import numpy as np
from numpy import ndarray
import librosa as L

device = 'cuda' if torch.cuda.is_available() else 'cpu'

BASE_PATH = Path(__file__).parent
DATA_PATH = BASE_PATH / 'data'
DEV_PATH = DATA_PATH / '1024_kws_dev'
TEST_PATH = DATA_PATH / '1024_kws_test'
SUBMIT_PATH = BASE_PATH / 'submit'
SUBMIT_PATH.mkdir(exist_ok=True)

SAMPLE_RATE = 16000

MODELS = [
  'tiny',
  'base',
  'v3-turbo',
]


def load_dev_index_file() -> List[Tuple[str, str, int]]:
  with open(DEV_PATH / 'devset_final.csv', 'r', encoding='utf-8') as fh:
    lines = fh.read().strip().split('\n')[1:]
    items = [line.split(',') for line in lines]
  # (anc_wav,anc_text,com_wav,com_text,label)
  return [(e[0], e[2], e[4])for e in items]


def load_testset_index_file() -> List[Tuple[str, str]]:
  with open(TEST_PATH / 'testset_final.csv', 'r', encoding='utf-8') as fh:
    lines = fh.read().strip().split('\n')[1:]
    items = [line.split(',') for line in lines]
  # (anc_wav,anc_text,com_wav)
  return [(e[0], e[2])for e in items]


def write_submit_file(fp:Path, preds:List[Tuple[str, float]]):
  with open(DATA_PATH / 'example.csv', 'r', encoding='utf-8') as fh:
    lines = fh.read().strip().split('\n')[1:]
    valid_fns = [line.split(',')[0] for line in lines]
  nlen_need = len(valid_fns)
  nlen_have = len(preds)
  assert nlen_have == nlen_need, f'invalid preds length: nlen_have ({nlen_have}) != nlen_need ({nlen_need})'

  with open(fp, 'w', encoding='utf-8') as fh:
    fh.write('anc_wav,label\n')
    for fn, prob in preds:
      valid_fn = f'./Test/{fn}'
      assert valid_fn in valid_fns, f'invalid fn {fn!s}, shoul be like "anc_<idx>.wav"'
      fh.write(f'{valid_fn},{prob}\n')


def get_whisper_model(model_path:str) -> Tuple[WhisperForConditionalGeneration, WhisperProcessor]:
  processor: WhisperProcessor = WhisperProcessor.from_pretrained(model_path)
  if 'hijack processor config':
    # limit audio length
    FE: WhisperFeatureExtractor = processor.feature_extractor
    FE.n_samples = FE.hop_length * 200
    FE.nb_max_frames = FE.n_samples // FE.hop_length  # let this be 200 = 2s for SR=16000 & HOP=160 🤔
    print('>> n_samples:', FE.n_samples)
    print('>> nb_max_frames:', FE.nb_max_frames)
  model: WhisperForConditionalGeneration = WhisperForConditionalGeneration.from_pretrained(model_path).to(device)
  if 'hijack model config':
    model.config.forced_decoder_ids = None
    model.config.suppress_tokens = []
    model_encoder = model.get_encoder()
    model.config.max_source_positions = FE.nb_max_frames // (model_encoder.conv1.stride[0] * model_encoder.conv2.stride[0])  # nb_max_frames=max_source_positions*2
    model_encoder.embed_positions.weight.data = model_encoder.embed_positions.weight.data[:model.config.max_source_positions]
    print('>> max_source_positions:', model.config.max_source_positions)
    print(model.config)
  return model, processor


def get_whisper_latent(y:Union[str, ndarray], model:WhisperForConditionalGeneration, processor:WhisperProcessor) -> BaseModelOutput:
  #print('|y|.max:', max(y.max(), -y.min()))
  if isinstance(y, str):
    y, sr = L.load(y, sr=None)
  else:
    sr = 16000
  input_features = processor(y, sampling_rate=sr, return_tensors='pt', device=device).input_features.to(device)
  if not 'plot':
    import matplotlib.pyplot as plt
    plt.imshow(input_features[0].cpu())
    plt.show()
  encoder_outputs: BaseModelOutput = model.get_encoder()(input_features, output_attentions=True, output_hidden_states=True)
  if not 'use decoder':
    # "<|startoftranscript|><|?lang?|><|transcribe|><|notimestamps|>"
    decoder_input_ids = torch.LongTensor([[50258, 50259, 50359, 50363]], device=input_features.device)
    decoder_outputs: BaseModelOutput = model.get_decoder()(input_ids=decoder_input_ids, encoder_hidden_states=encoder_outputs[0])
    return decoder_outputs  # [B=1, L=4, D=512]
  return encoder_outputs


class PairedAudioTrainset(Dataset):

  def __init__(self, processor:WhisperProcessor, nlen:int=5000):
    super().__init__()

    self.processor = processor
    self.wav_folder = DATA_PATH / '1024_Train' / 'Train'
    self.nlen = nlen

    with open(DATA_PATH / '1024_Train' / 'Train' / 'train-datainfo.json', 'r', encoding='utf-8') as fh:
      self.database: Dict[str, List[str]] = json.load(fh)
    self.all_words = list(self.database.keys())
    self.multi_words = list(k for k, v in self.database.items() if len(v) > 1)
    print('>> n_words:', len(self.all_words))
    print('>> n_words_multi:', len(self.multi_words))

    # 数据增强
    self.speed_rates = None
    self.augment_configs = [
      {
        'type': 'speed',
        'params': {
          'min_speed_rate': 0.8,
          'max_speed_rate': 1.2,
          'num_rates': 5,
        },
        'prob': 0.5,
      },
      {
        'type': 'shift',
        'params': {
          'min_shift_ms': -5,
          'max_shift_ms': +5,
        },
        'prob': 0.0,
      },
      {
        'type': 'volume',
        'params': {
          'min_gain_dBFS': -15,
          'max_gain_dBFS': +15,
        },
        'prob': 0.5,
      }
    ]

  def sample_pair(self) -> Tuple[Path, Path]:
    label = int(random.random() < 0.5)
    if label == 1:  # case for match!!
      w1, w2 = random.sample(self.multi_words, 2)
    else:           # case for mismatch
      w1, w2 = random.sample(self.all_words, 2)
    fp1 = random.choice(self.database[w1])
    fp2 = random.choice(self.database[w2])
    return fp1, fp2

  def __len__(self):
    return self.nlen

  def __getitem__(self, idx:int):
    anc_fp, com_fp = self.sample_pair()
    anc_wav, _ = L.load(self.wav_folder / anc_fp, sr=SAMPLE_RATE)
    com_wav, _ = L.load(self.wav_folder / com_fp, sr=SAMPLE_RATE)
    com_wav = self.augment(com_wav)
    batch = self.processor(audio=[anc_wav, com_wav], sampling_rate=SAMPLE_RATE).input_features
    return batch[0], batch[1]

  def augment(self, y:ndarray):
    for config in self.augment_configs:
      if config['type'] == 'speed' and config['prob'] and random.random() < config['prob']:
        if self.speed_rates is None:
          min_speed_rate, max_speed_rate, num_rates = config['params']['min_speed_rate'], config['params']['max_speed_rate'], config['params']['num_rates']
          self.speed_rates = np.linspace(min_speed_rate, max_speed_rate, num_rates, endpoint=True)
        rate = random.choice(self.speed_rates)
        y = self.augment_speed(y, speed_rate=rate)
      if config['type'] == 'volume' and config['prob'] and random.random() < config['prob']:
        min_gain_dBFS, max_gain_dBFS = config['params']['min_gain_dBFS'], config['params']['max_gain_dBFS']
        gain = random.randint(min_gain_dBFS, max_gain_dBFS)
        y = self.augment_volume(y, gain=gain)
    return y

  @staticmethod
  def augment_speed(y:ndarray, speed_rate:float):
    if speed_rate <= 0 or speed_rate == 1.0: return y
    old_length = y.shape[0]
    new_length = int(old_length / speed_rate)
    old_indices = np.arange(old_length)
    new_indices = np.linspace(start=0, stop=old_length, num=new_length)
    return np.interp(new_indices, old_indices, y).astype(np.float32)

  @staticmethod
  def augment_volume(y:ndarray, gain:int):
    return y * 10.**(gain / 20.)


class PairedAudioTestset(Dataset):

  def __init__(self, processor:WhisperProcessor, split:str='test', use_mdx:bool=False):
    super().__init__()

    self.processor = processor
    if split == 'val':
      self.wav_folder = DATA_PATH / '1024_kws_dev' / ('Dev.mdx' if use_mdx else 'Dev')
      self.nlen = 4000
    elif split == 'test':
      self.wav_folder = DATA_PATH / '1024_kws_test' / ('Test.mdx' if use_mdx else 'Test')
      self.nlen = 15410
    else:
      raise ValueError(split)

  def __len__(self):
    return self.nlen

  def __getitem__(self, idx:int):
    anc_wav, _ = L.load(self.wav_folder / f'anc_{idx+1}.wav', sr=SAMPLE_RATE)
    com_wav, _ = L.load(self.wav_folder / f'com_{idx+1}.wav', sr=SAMPLE_RATE)
    batch = self.processor(audio=[anc_wav, com_wav], sampling_rate=SAMPLE_RATE).input_features    # [B=2, D=80, L=200]
    return batch[0], batch[1]


if __name__ == '__main__':
  model, processor = get_whisper_model('openai/whisper-tiny')
  ds = PairedAudioTestset(processor)
  print(ds[0])
  ds = PairedAudioTrainset(processor)
  print(ds[0])
