import torch
import numpy as np
import librosa
from scipy import signal
from os.path import basename

def load_wav(path, sr):
  return librosa.core.load(path, sr=sr)[0]

def preemphasis(wav, k):
  return signal.lfilter([1, -k], [1], wav)

def melspectrogram(wav):
  D = _stft(preemphasis(wav, 0.97))
  S = _amp_to_db(_linear_to_mel(np.abs(D))) - 20

  return _normalize(S)


def _stft(y):
  return librosa.stft(y=y, n_fft=800, hop_length=200, win_length=800)


def _linear_to_mel(spectogram):
  global _mel_basis
  _mel_basis = _build_mel_basis()
  return np.dot(_mel_basis, spectogram)


def _build_mel_basis():
  return librosa.filters.mel(sr=16000, n_fft=800, n_mels=80, fmin=55, fmax=7600)


def _amp_to_db(x):
  min_level = np.exp(-5 * np.log(10))
  return 20 * np.log10(np.maximum(min_level, x))


def _normalize(S):
  return np.clip((2 * 4.) * ((S - -100) / (--100)) - 4., -4., 4.)

class AudDataset(object):
  def __init__(self, wavpath, is_normalize=False):
    self.wav = load_wav(wavpath, 16000)
    if is_normalize:
      self.wav = librosa.util.normalize(self.wav)    
    self.orig_mel = melspectrogram(self.wav).T # shape: (80*sec) x 80

  def get_frame_id(self, frame):
    return int(basename(frame).split('.')[0])

  def crop_audio_window_mid(self, spec, mid_frame):
    if type(mid_frame) == int:
      start_frame_num = mid_frame
    else:
      start_frame_num = self.get_frame_id(mid_frame)
    mid_idx = int(80. * (start_frame_num / float(25)))
    start_idx = mid_idx - 8
    end_idx = mid_idx + 8
    if start_idx < 0 :
      start_pad = np.abs(start_idx)
      start_idx = 0
      start_frame = spec[0].reshape(1, 80)
      start_frames = np.repeat(start_frame, start_pad, axis=0)
    if end_idx > spec.shape[0]:
      end_pad = end_idx - spec.shape[0] + 1
      end_idx = spec.shape[0] - 1
      end_frame = spec[end_idx].reshape(1, 80)
      end_frames = np.repeat(end_frame, end_pad, axis=0) # (end_pad, 80)
    
    spec = spec[start_idx: end_idx, :]
    if 'start_frames' in locals():
      spec = np.concatenate((start_frames, spec), axis=0)
    if 'end_frames' in locals():
      spec = np.concatenate((spec, end_frames), axis=0)
    return spec
  
  def __len__(self):
    return int(self.orig_mel.shape[0] / 80. * float(25))

  def __getitem__(self, idx):
    if idx >= self.__len__():
      raise IndexError('index out of range')
    mel = self.crop_audio_window_mid(self.orig_mel.copy(), idx)
    if (mel.shape[0] != 16):
      raise Exception('mel.shape[0] != 16')
    mel = torch.FloatTensor(mel.T).unsqueeze(0)

    return mel

class BFMDataset(object):
  def __init__(self, npz_path):
    collection = np.load(npz_path, allow_pickle=True)
    self.files = collection.files
    self.jaw_pose = collection.get('jaw_pose')
    self.expr = collection.get('expr')
    self.shape = collection.get('shape')
    self._len = len(self.expr)
    
  def __len__(self):
    return self._len

  def __getitem__(self, idx):
    """
    output:
      size: (100 + 3) = 103 expr/ jaw_pose
    """
    expr = self.expr[idx] # 100
    jaw_pose = self.jaw_pose[idx] # 3
    shape = self.shape # 300
    
    expr_pre = self.expr[max(idx - 1, 0)]
    jaw_pose_pre = self.jaw_pose[max(idx - 1, 0)]
    
    outputs_cur = np.concatenate((expr, jaw_pose), axis=0)
    outputs_pre = np.concatenate((expr_pre, jaw_pose_pre), axis=0)
    return torch.from_numpy(outputs_cur), torch.from_numpy(outputs_pre), torch.from_numpy(shape)
  
  
class BFMDataset(object):
  _target_fps = 25
  
  def __init__(self, npz_path):
    collection = np.load(npz_path, allow_pickle=True)
    self.files = collection.files
    self.shapes = collection['shapes']
    self.exprs = collection['exprs']
    self.rotvecs = collection['rotvecs']
    self.translation = collection['translation']
    
    frame_count = collection["frame_count"]
    self._orig_fps = collection["fps"]
    self._trans_coef = self._orig_fps / self._target_fps
    
    self._len = int(frame_count / self._trans_coef)
    
  def __len__(self):
    return self._len
  
  def __getitem__(self, idx):
    """
    output:
      expr 79
      shape 100
    """
    orig_idx_cur = int(idx * self._trans_coef)
    orig_idx_pre = int((idx - 1) * self._trans_coef)
    expr = self.exprs[orig_idx_cur] # 79
    shape = self.shapes[0] # 100
    
    expr_pre = self.exprs[max(orig_idx_pre, 0)]
    
    return torch.from_numpy(expr), torch.from_numpy(expr_pre), torch.from_numpy(shape)
    
    