from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import glob
from hparams import hparams as hp
from util import audio
import jieba
from pypinyin import pinyin, lazy_pinyin, Style
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
import pdb

class MyConverter(NeutralToneWith5Mixin, DefaultConverter):
    pass

my_pinyin = Pinyin(MyConverter())
pinyin = my_pinyin.pinyin
lazy_pinyin = my_pinyin.lazy_pinyin

sentence_map = {
  '01' : '啊你可真伟大啊',
  '02' : '快点干吧',
  '03' : '这下完了',
  '04' : '啊下雨了',
  '05' : '太棒了',
  '06' : '我真的以为你是这个意思',
  '07' : '我在论文上看见你的名字了',
  '08' : 'AC米兰赢球了',
  '09' : '我这次考试刚刚通过',
  '10' : '今天是星期天',
  '11' : '哎你这人',
  '12' : '电话铃响了',
  '13' : '他就快来了',
  '14' : '路上人真多啊',
  '15' : '明天我要搬家了',
  '16' : '这件事是他干的',
  '17' : '你这段时间变瘦了',
  '18' : '过两天学校就要开学了',
  '19' : '昨天晚上我做了一个梦',
  '20' : '有一辆车向我们开过来了'
}

def generate_trn_file(in_dir,out_dir):
   data_dir = os.path.join(in_dir,'data')
   trn_dir = os.path.join(in_dir,'data_trn')
   if not os.path.exists(trn_dir):
     os.makedirs(trn_dir)
   all_files = os.listdir(data_dir)
   all_files.sort()
   for afile in all_files:
     try:
      sentence_id = os.path.splitext(afile)[0][-3:-1]
      sentence_content = sentence_map[sentence_id]
      word_list = jieba.lcut(sentence_content)
      word_str = ' '.join(word_list)
      pinyin_list = lazy_pinyin(sentence_content, style=Style.TONE3, heteronym=False)
      pinyin_str = ' '.join(pinyin_list)
      trn_file = os.path.join(trn_dir,os.path.splitext(afile)[0] + '.trn')
      with open(trn_file,'w',encoding = 'utf-8') as f:
        print(word_str,file = f)
        print(pinyin_str,file = f)
     except:
       continue
   


def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
  '''Preprocesses the THCHS30 dataset from a given input path into a given output directory.

    Args:
      in_dir: The directory where you have downloaded the THCHS30 dataset
      out_dir: The directory to write the output into
      num_workers: Optional number of worker processes to parallelize across
      tqdm: You can optionally pass tqdm to get a nice progress bar

    Returns:
      A list of tuples describing the training examples. This should be written to train.txt
  '''

  # We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
  # can omit it and just call _process_utterance on each input if you want.
  executor = ProcessPoolExecutor(max_workers=num_workers)
  futures = []
  index = 1

  trn_files = glob.glob(os.path.join(in_dir, 'data_trn', '*.trn'))

  for trn in trn_files:
    with open(trn,encoding = 'utf-8') as f:
      pinyin = f.readline().strip('\n')
      pinyin = f.readline().strip('\n')
      wav_file = trn[:-4] + '.wav'
      wav_file = os.path.basename(wav_file)
      wav_file = os.path.join(in_dir,'data',wav_file)
      task = partial(_process_utterance, out_dir, index, wav_file, pinyin)
      futures.append(executor.submit(task))
      index += 1
  return [future.result() for future in tqdm(futures) if future.result() is not None]

def _process_utterance(out_dir, index, wav_path, pinyin):
    '''Preprocesses a single utterance audio/text pair.

    This writes the mel and linear scale spectrograms to disk and returns a tuple to write
    to the train.txt file.

    Args:
      out_dir: The directory to write the spectrograms into
      index: The numeric index to use in the spectrogram filenames.
      wav_path: Path to the audio file containing the speech input
      pinyin: The pinyin of Chinese spoken in the input audio file

    Returns:
      A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
    '''

    # Load the audio to a numpy array:
    wav = np.array(audio.load_wav(wav_path)[:,0],order = 'C')
  
    # trim silence
    wav = audio.trim_silence(wav)

    # feature extraction
    f0, sp, ap = audio.feature_extract(wav)
    n_frames = len(f0)
    if n_frames > hp.max_frame_num:
      return None

    # feature normalization
    lf0 = audio.f0_normalize(f0)
    mgc = audio.sp_normalize(sp)
    bap = audio.ap_normalize(ap)

    lf0_file = 'lf0-%05d.npy' % index
    mgc_file = 'mgc-%05d.npy' % index
    bap_file = 'bap-%05d.npy' % index
    np.save(os.path.join(out_dir, lf0_file), lf0, allow_pickle=False)
    np.save(os.path.join(out_dir, mgc_file), mgc, allow_pickle=False)
    np.save(os.path.join(out_dir, bap_file), bap, allow_pickle=False)

    # Return a tuple describing this training example:
    return (lf0_file, mgc_file, bap_file, n_frames, pinyin)