#!/usr/bin/env python3
# Author: Armit
# Create Time: 周二 2025/07/29

# 测试集预处理-第2步: 音量统一/长度裁剪
# 共计21222个1s样本 = 5.895h

from pathlib import Path
from argparse import ArgumentParser
from typing import List

import numpy as np
from numpy import ndarray
import librosa as L
import soundfile as sf
from scipy import signal
from tqdm import tqdm

from utils import DATA_PATH, PREPROCESSED_PATH

DURATION    = 1.0
SAMPLE_RATE = 16000
SAMPLE_LEN  = int(SAMPLE_RATE * DURATION)
INT16_MAX   = np.iinfo(np.int16).max

firwin = None

def wav_load(fp:Path, duration:float=None) -> ndarray:
  y, sr = L.load(fp, sr=None, mono=True, duration=duration, dtype=np.float32)
  assert sr == SAMPLE_RATE, f'raw wav sr {sr} != {SAMPLE_RATE} as expected'
  return y

def wav_norm_peak(y:ndarray) -> ndarray:
  y = L.util.normalize(y, norm=np.inf)
  return y

def wav_norm_nonlinear(y:ndarray) -> ndarray:
  if np.abs(y).max() < 1e-3: return y
  y = y / np.abs(y).max() * 0.999
  # factor 0.5 in case of overflow for int16
  f1 = 0.5 * INT16_MAX / max(0.01, np.max(np.abs(y)))
  # sublinear scaling as Y ~ X ^ k (k < 1)
  f2 = np.sign(y) * np.power(np.abs(y), 0.667)
  y = f1 * f2 / INT16_MAX
  return y

def wav_trim(y:ndarray, top_db:int=45, frame_length:int=512, hop_length:int=128) -> ndarray:
  y = L.effects.trim(y, top_db=top_db, frame_length=frame_length, hop_length=hop_length)[0]
  return y

def wav_band_pass(y:ndarray) -> ndarray:
  global firwin
  if firwin is None:
    N_FREQ = 1024
    FMIN   = 40
    FMAX   = 2100
    firwin = signal.firwin(N_FREQ, [FMIN, FMAX], pass_zero=False, fs=SAMPLE_RATE)

  y = signal.convolve(y, firwin)
  return y


def run(args):
  y_list: List[ndarray] = []
  fps = [fp for fp in Path(args.input).iterdir() if fp.suffix == '.wav' and fp.stem != 'ydata']
  # 'iflytek_1.wav' or 'iflytek_1_vocals.wav'
  fps.sort(key=lambda e: int(e.stem.split('_')[1]))   # sort by id num
  for fp in tqdm(fps):
    if args.trim:         # more reasonable!! :)
      y = wav_load(fp)
      y = wav_norm_peak(y)
      y = wav_trim(y)
    else:                 # follow the context-rule :(
      y = wav_load(fp, duration=1.0)
      y = wav_norm_peak(y)
    y = y[:SAMPLE_LEN]    # assure only use the leading 1s
    y_list.append(y)
    sf.write(args.output / fp.name, y, SAMPLE_RATE)

  y_list = [np.pad(y, (0, SAMPLE_LEN - len(y))) if len(y) < SAMPLE_LEN else y for y in y_list]
  yconcat = np.concatenate(y_list, axis=0, dtype=np.float32)
  sf.write(args.output / 'ydata.mp3', yconcat, SAMPLE_RATE)
  #ystack = np.stack(y_list, axis=0, dtype=np.float32)
  #np.savez_compressed(save_dp / 'ydata.npz', ydata=ystack)


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-I', '--input', default=DATA_PATH, type=Path, help='input audio folder')
  parser.add_argument('-O', '--output', type=Path, help='output preprocessed data folder')
  parser.add_argument('--trim', action='store_true', help='trim the leading silence')
  args = parser.parse_args()

  if args.output is None:
    name = args.input.stem
    if args.trim:
      name += '_trim'
    args.output = PREPROCESSED_PATH / name
  Path(args.output).mkdir(exist_ok=True)

  run(args)
