#!/usr/bin/env python3
# Author: Armit
# Create Time: 周五 2025/08/01

# 测试集预处理-第1步: 文件夹批量推理 UVR_MDXNET_Main 预训练模型 (⚠需要~27h)
# NOTE: base code copied from https://github.com/seanghay/uvr-mdx-infer
# also refers to https://github.com/Anjok07/ultimatevocalremovergui:separate.SeperateMDX

import os
from time import time
from pathlib import Path
from argparse import ArgumentParser
from typing import Tuple

import torch
import librosa
import soundfile as sf
import onnxruntime as ort
import numpy as np
from tqdm import tqdm

device = 'cuda' if torch.cuda.is_available() else 'cpu'

# MDXNet模型要求
BASE_PATH = Path(__file__).parent
MODEL_PATH = BASE_PATH / 'weights' / 'UVR_MDXNET_Main.onnx'
RESAMPE_RATE = 44100
# 保存要求
SAMPLE_RATE = 16000

class STFT:

  ''' ultimatevocalremovergui::lib_v5/tfc_tdf_v3.py '''

  def __init__(self, target_name:str, dim_f:int, dim_t:int, n_fft:int, hop:int=1024):   # hop is fixed!!
    super().__init__()

    self.target_name = target_name
    self.dim_f = dim_f
    self.dim_t = 2**dim_t
    self.dim_c = 4
    self.n_fft = n_fft
    self.n_bins = self.n_fft // 2 + 1           # FFT为3073段
    self.hop = hop
    self.chunk_size = hop * (self.dim_t - 1)    # 每个分片: 255跳=262144采样点=5.921s
    self.freq_pad = torch.zeros([1, self.dim_c * 4 if target_name == '*' else self.dim_c, self.n_bins - self.dim_f, self.dim_t], device=device)
    self.window = torch.hann_window(window_length=self.n_fft, periodic=True, device=device)

  def stft(self, x:torch.Tensor) -> torch.Tensor:
    x = x.reshape([-1, self.chunk_size])
    x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, return_complex=True)
    x = torch.view_as_real(x)
    x = x.permute([0, 3, 1, 2])
    x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape([-1, self.dim_c, self.n_bins, self.dim_t])
    return x[:, :, : self.dim_f]  # 取低频 [B=1, C=4, D=3073 => 3072, L=256]

  def istft(self, x:torch.Tensor) -> torch.Tensor:
    x = torch.cat([x, self.freq_pad.repeat([x.shape[0], 1, 1, 1])], -2)
    c = 4 * 2 if self.target_name == '*' else 2
    x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape([-1, 2, self.n_bins, self.dim_t])
    x = x.permute([0, 2, 3, 1])
    x = x.contiguous()
    x = torch.view_as_complex(x)
    x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True)
    return x.reshape([-1, c, self.chunk_size])


class Predictor:

  def __init__(self, args:dict):
    self.args = args
    self.stft = STFT(
      target_name='vocals',
      dim_f=args['dim_f'],
      dim_t=args['dim_t'],
      n_fft=args['n_fft']
    )
    # 因为没有安装gonnxruntime-gpu版本 :(
    provider = 'CPUExecutionProvider'   # 'CUDAExecutionProvider'
    self.model = ort.InferenceSession(args['model_path'], providers=[provider])

  def demix(self, mix:np.ndarray) -> np.ndarray:
    n_samples = mix.shape[-1]
    margin = self.args['margin']
    chunk_size = self.args['chunks'] * RESAMPE_RATE
    if margin > chunk_size:
      margin = chunk_size
    if self.args['chunks'] == 0 or n_samples < chunk_size:
      chunk_size = n_samples

    counter = -1
    segmented_mix = {}  # start_pos => wav_chunk
    for skip in range(0, n_samples, chunk_size):
      counter += 1
      s_margin = 0 if counter == 0 else margin
      end = min(skip + chunk_size + margin, n_samples)
      start = skip - s_margin
      segmented_mix[skip] = mix[:, start:end].copy()
      if end == n_samples:
        break

    return self.demix_base(segmented_mix, margin)

  @torch.inference_mode
  def demix_base(self, mixes:dict, margin_size:int) -> np.ndarray:
    chunked_sources = []
    for mix, cmix in mixes.items():
      n_samples = cmix.shape[1]
      trim = self.stft.n_fft // 2                 # 3072
      gen_size = self.stft.chunk_size - 2 * trim  # 254976
      pad = gen_size - n_samples % gen_size
      # 原音频后填充pad(补齐到chunk_size)，然后前后填充trim(stft滑动窗口开销)，作为完整音轨
      mix_p = np.concatenate((np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1)

      # 完整音轨按stft的chunk_size大小进行切片；我们的用例中应该只有一片 (B=1)
      mix_waves = []
      i = 0
      while i < n_samples + pad:
        waves = np.asarray(mix_p[:, i : i + self.stft.chunk_size])
        mix_waves.append(waves)
        i += gen_size     # 有bug?
      # 组batch，过推理
      mix_waves = torch.tensor(np.asarray(mix_waves), device=device, dtype=torch.float32)   # [1, 2, 261120]
      spek = self.stft.stft(mix_waves)    # [1, C=4, D=3072, L=256], C=4 双声道/虚部实部
      spek[:, :, :3, :] *= 0                   # 丢掉不可听见的低频
      spek = spek.cpu().numpy()
      if self.args['denoise']:
        spec_pred = 0.5 * (
            self.model.run(None, {'input':  spek})[0]
          - self.model.run(None, {'input': -spek})[0]
        )
      else:
        spec_pred = self.model.run(None, {'input': spek})[0]
      tar_waves = self.stft.istft(torch.tensor(spec_pred, device=device))
      # 去除trim和pad；但是这里为什么没有解batch??!
      tar_signal = tar_waves[:, :, trim:-trim].transpose(0, 1).reshape(2, -1).cpu().numpy()[:, :-pad]

      start = 0 if mix == 0 else margin_size
      end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
      if margin_size == 0: end = None
      chunked_sources.append(tar_signal[:, start:end])

    return np.concatenate(chunked_sources, axis=-1)

  def predict(self, fp:Path) -> Tuple[np.ndarray, np.ndarray]:
    mix, _ = librosa.load(fp, mono=False, sr=RESAMPE_RATE)
    if mix.ndim == 1:
      mix = np.asfortranarray([mix, mix])   # [C=2, L=44100]
    opt = self.demix(mix).T                 # [L=44100, C=2]
    opt = opt[:, 0] / 2 + opt[:, 1] / 2     # stereo to mono
    return opt


if __name__ == '__main__':
  parser = ArgumentParser()
  parser.add_argument('-M', '--model_path', type=Path, default=MODEL_PATH,    help='MDX Net ONNX Model path')
  parser.add_argument('-I', '--input',      type=Path, required=True,         help='Input file or folder')
  parser.add_argument('-O', '--output',     type=Path,                        help='Output folder')
  parser.add_argument('-F', '--n_fft',      type=int,  default=7680)          # fixed wrt. model version
  parser.add_argument('-f', '--dim_f',      type=int,  default=3072)          # fixed wrt. model version
  parser.add_argument('-t', '--dim_t',      type=int,  default=8)             # fixed wrt. model version, 2**8=256
  parser.add_argument('-c', '--chunks',     type=int,  default=0,             help='Chunk size; 0 means do not chunk when demix')
  parser.add_argument('-m', '--margin',     type=int,  default=RESAMPE_RATE, help='Margin')
  parser.add_argument('-D', '--denoise', action='store_false', default=True,  help='Disable denoising')
  parser.add_argument('--overwrite', action='store_true',  help='Force overwrite if file exists')
  args = parser.parse_args()

  assert args.model_path.is_file(), '--model_path not exists, please manually download UVR_MDXNET_Main.onnx from https://github.com/TRvlvr/model_repo/releases/tag/all_public_uvr_models'
  assert args.margin != 0, '--margin can not be 0'

  if args.input.is_file():
    fps = [args.input]
    if args.output is None:
      args.output = args.input.parent
  else:
    assert args.input.is_dir(), '--input must be an audio folder'
    fps = [fp for fp in args.input.iterdir() if fp.suffix == '.wav']
    try: fps.sort(key=lambda e: int(e.stem.split('_')[1]))   # sort by id num
    except: pass
    if args.output is None:
      args.output = args.input.with_suffix('.mdx')
  os.makedirs(args.output, exist_ok=True)
  print(f'>> Read from folder: {args.input}')
  print(f'>> Save to folder: {args.output}')

  predictor = Predictor(vars(args))
  ts_start = time()
  for fp in tqdm(fps):
    if not fp.is_file(): continue
    if fp.suffix.lower() not in ['.wav', '.mp3', '.flac']: continue
    fp_save: Path = args.output / (fp.stem + '_vocals.wav')
    if not args.overwrite and fp_save.exists(): continue
    vocals = predictor.predict(fp)
    vocals = librosa.resample(vocals, orig_sr=RESAMPE_RATE, target_sr=SAMPLE_RATE)
    sf.write(fp_save, vocals, SAMPLE_RATE)
  ts_end = time()
  print(f'>> Done {ts_end - ts_start:.3f}s')
