#!/usr/bin/python
# -*- encoding:utf-8 -*-
'''
@Author     : Alexxrhuang
@Data       : 2019.06.27
@Updatas    :
              2019.07.10 -- 采用类别的比例分割数据集
'''


from __future__ import print_function
import os
import re
import sox
import math
import json
import shutil
import pickle
import random
import subprocess
import collections
import numpy as np
from glob import glob
from sklearn import preprocessing
import utils.vad2 as Vad
import utils.common as common
from utils.common import logger, create_folder
from utils.feature import load_audio, extract_fbank
transformer = sox.Transformer()
CURRENT_VERBOSITY = 3


# Global Var
LABELS = {}                                 # SED Class
SCLAER = preprocessing.StandardScaler()     # 用于CMVN


def analysis_time(wav_list, LABELS, label_percentage_threshold):
    time_dict = {k: 0 for k, v in LABELS.items()}
    utt_num_dict = {k: 1e-8 for k, v in LABELS.items()}
    for wav_path in wav_list:
        time = sox.file_info.duration(wav_path)
        wav_label = wav_path.strip().split('/')[-2]
        time_dict[wav_label] += time
        utt_num_dict[wav_label] += 1

    time_pertentage_dict = {k: float(v)/sum(time_dict.values()) for k, v in time_dict.items()}

    # 得到需要进行数据增强的类别
    data_enhance_list = []
    for k, v in time_pertentage_dict.items():
        # 当某个类的比例小于平均比例时，对该类进行数据增强
        if v < label_percentage_threshold:
            data_enhance_list.append(k)
    time_dict = collections.OrderedDict(time_dict)
    time_pertentage_dict = collections.OrderedDict(time_pertentage_dict)
    logger.info('data time is: ' + json.dumps(time_dict, indent=4), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('data percentage is: ' + json.dumps(time_pertentage_dict, indent=4), extra={"verbosity": CURRENT_VERBOSITY})
    logger.info('class that needs to enhance is: ' + ', '.join(data_enhance_list), extra={"verbosity": CURRENT_VERBOSITY})
    return data_enhance_list


def init_misc(LABELS, audio_dir, feat_dir):
    '''初始化LABELS

    Args:
        LABELS: SED类别以及对应的id
        audio_dir: 原始音频目录，不同类别数据放在不同的子文件夹
        feat_dir: 特征目录，在这里用于存储标注信息
    '''
    count = 0
    subdirs = glob(audio_dir + '/*')
    # 初始化LABELS
    for dir in subdirs:
        if os.path.isdir(dir):
            LABELS[dir.strip().split('/')[-1]] = count
            count += 1
    sorted(LABELS.items(), key=lambda i: i[1])

    label_info = feat_dir + '/label_info'
    with open(label_info, 'w') as lf:
        for key, value in LABELS.items():
            lf.write(key + ' ' + str(value) + '\n')


def make_feat_label(wav_dict, num_frame_len_fft, num_frame_shift_fft,
                    num_mel_bands, sample_rate, power):
    '''用于生成每个句子特征和标注

    Args:
        wav_dict: 音频列表以及对应的标注

    Returns:
        total_fbank: 当前集合所有的fbank特征
        total_label: 当前集合所有的SED标注
    '''
    total_fbank = None
    total_label = None
    for wav, class_id in wav_dict.items():
        try:
            # 提取wav的数据，shape=(signal_length, channel)
            pcm_data = load_audio(wav, mono=True, fs=sample_rate)
        except Exception as e:
            logger.error(str(e) + ' ' + wav, extra={"verbosity": 0})
            continue
        # 提取wav的fbank特征
        fbank = extract_fbank(pcm_data, sample_rate, num_frame_len_fft, num_frame_shift_fft, num_mel_bands, power).T
        # 提取wav的标注
        label = np.zeros((fbank.shape[0], len(LABELS)))
        label[:, class_id] = 1   # 该句子的所有帧设置为当前标注
        if total_fbank is None:
            total_fbank = fbank
            total_label = label
        else:
            total_fbank, total_label = np.concatenate((total_fbank, fbank), 0), \
                np.concatenate((total_label, label), 0)
    return total_fbank, total_label


def split_wav(scp_dir, feat_dir, LABELS, data_ratio):
    '''用于分割数据集

    Args:
        scp_dir: 所有音频索引
        speed_list: 速度扰动，Sox文档查询：https://pysox.readthedocs.io/en/latest/api.html

    Returns:
        train_wav_list: 训练集音频集合
        dev_wav_list: 验证集音频集合
        test_wav_list: 测试集音频集合
    '''
    total_wav_list = []
    with open(scp_dir, 'r') as scp_file:
        while True:
            scp_line = scp_file.readline()
            if scp_line == '':
                break
            wav_path = scp_line.strip().split()[1]
            total_wav_list.append(wav_path)

    lists = [[] for i in range(len(LABELS))]
    for wav_path in total_wav_list:
        wav_label = LABELS[wav_path.split('/')[-2]]
        lists[wav_label].append(wav_path)

    train_ratio = data_ratio[0]
    dev_ratio = data_ratio[1]
    train_wav_list, dev_wav_list, test_wav_list = [], [], []                        # 包含所有类的音频
    train_wav_list_split, dev_wav_list_split, test_wav_list_split = [], [], []      # 每个类的音频单独
    for wav_list in lists:
        random.shuffle(wav_list)      # 随机打乱每个类的数据，保证数据集分发的有效性
        wav_num = len(wav_list)
        train_wav_list_split.append(wav_list[:int(math.floor(wav_num*train_ratio))])
        dev_wav_list_split.append(wav_list[int(math.floor(wav_num*train_ratio)): int(math.floor(wav_num*(train_ratio+dev_ratio)))])
        test_wav_list_split.append(wav_list[int(math.floor(wav_num*(train_ratio+dev_ratio))):])
        print(wav_num, len(wav_list[:int(math.floor(wav_num*train_ratio))]), len(wav_list[int(math.floor(wav_num*train_ratio)): int(math.floor(wav_num*(train_ratio+dev_ratio)))]),
              len(wav_list[int(math.floor(wav_num*(train_ratio+dev_ratio))):]))
        print(wav_list[0])
    train_wav_list = [x for sublist in train_wav_list_split for x in sublist]       # flatten
    dev_wav_list = [x for sublist in dev_wav_list_split for x in sublist]
    test_wav_list = [x for sublist in test_wav_list_split for x in sublist]

    return train_wav_list, dev_wav_list, test_wav_list


def preprocess_dataset(scp_dir, feat_dir, audio_dir, vad_audio_dir, LABELS, info, is_vad, speed_list):
    num_labels = len(LABELS.keys())
    label_percentage_threshold = 1.0/(1.3 * num_labels)

    wav_lists = []
    if not os.path.isfile(feat_dir + '/' + info[0] + '.list'):
        logger.info('Start spliting dataset to train|dev|test', extra={"verbosity": CURRENT_VERBOSITY})
        wav_lists = list(split_wav(scp_dir, feat_dir, LABELS, [0.8, 0.1, 0.1]))     # turn the tuple to list
        # 分别对三个集合进行预处理
        for i in range(len(info)):
            logger.info('[*] Start processing dataset: %s' % (info[i]), extra={"verbosity": CURRENT_VERBOSITY})
            # 时间分析
            data_enhance_list = analysis_time(wav_lists[i], LABELS, label_percentage_threshold)
            # Vad
            if is_vad:
                logger.info('Start doing Vad', extra={"verbosity": CURRENT_VERBOSITY})
                output_audio_dir = os.path.join(vad_audio_dir, info[i])
                # 删除旧VAD结果
                if os.path.isdir(output_audio_dir):
                    shutil.rmtree(output_audio_dir, ignore_errors=True)
                create_folder(output_audio_dir)
                sub_dirs = glob(audio_dir + '/*')
                for sub_dir in sub_dirs:
                    if os.path.isdir(sub_dir):
                        sub_dir = sub_dir.strip().split('/')[-1]
                        create_folder(os.path.join(output_audio_dir, sub_dir))

                wav_lists[i] = Vad.vad(audio_dir, output_audio_dir, wav_lists[i])
                data_enhance_list = analysis_time(wav_lists[i], LABELS, label_percentage_threshold)
            else:
                logger.info('Avoid doing Vad', extra={"verbosity": CURRENT_VERBOSITY})

            # speed perturbation
            # 避免重复VAD，将增强放在VAD后; 只需要对训练集进行增强
            if speed_list is not None and info[i] == 'train':
                logger.info('Start doing speed & volume perturbation for train', extra={"verbosity": CURRENT_VERBOSITY})
                is_pitch_correction = False
                scale_low = 0.125
                scale_high = 2
                vol_scale_list = [random.uniform(scale_low, scale_high) for i in range(2)]      # 音调调制
                tmp_list = wav_lists[i][:]
                for wav_path in tmp_list:
                    wav_label = wav_path.split('/')[-2]
                    if wav_label in data_enhance_list:
                        wav_name = wav_path.split('/')[-1]
                        wav_path_affix = '/'.join(wav_path.split('/')[:-1])
                        for speed_rate in speed_list:
                            out_wav_path = wav_path_affix + '/sp' + str(speed_rate) + '-' + wav_name
                            # transformer.speed(speed_rate)                 # python版本的sox做法
                            # transformer.build(wav_path, out_wav_path)
                            try:
                                if is_pitch_correction:
                                    shell_command = 'sox -t wav %s -t wav %s tempo %f' % (wav_path, out_wav_path, speed_rate)
                                else:
                                    shell_command = 'sox -t wav %s -t wav %s speed %f' % (wav_path, out_wav_path, speed_rate)
                                with open(os.devnull, 'w') as devnull:
                                    subprocess.check_output(shell_command.split(), stderr=devnull)
                            except Exception as e:
                                logger.info('wav %s, error: %s' % (wav_path, str(e)), extra={"verbosity": CURRENT_VERBOSITY})
                            wav_lists[i].append(out_wav_path)

                        for vol_scale in vol_scale_list:
                            out_wav_path = wav_path_affix + '/vol' + str(vol_scale) + '-' + wav_name
                            try:
                                shell_command = 'sox --vol %f -t wav %s -t wav %s' % (vol_scale, wav_path, out_wav_path)
                                with open(os.devnull, 'w') as devnull:
                                    subprocess.check_output(shell_command.split(), stderr=devnull)
                            except Exception as e:
                                logger.info('wav %s, error: %s' % (wav_path, str(e)), extra={"verbosity": CURRENT_VERBOSITY})
                            wav_lists[i].append(out_wav_path)
                data_enhance_list = analysis_time(wav_lists[i], LABELS, label_percentage_threshold)
            else:
                logger.info('Avoid doing speed perturbation', extra={"verbosity": CURRENT_VERBOSITY})

            # 丢弃小于100k的音频
            for wav_path in wav_lists[i]:
                if os.path.getsize(wav_path) < 100 * 1024:
                    logger.info('Discard too short speech', extra={"verbosity": CURRENT_VERBOSITY})
                    wav_lists[i].remove(wav_path)
    else:
        # 直接从文件读
        logger.info('Avoid spliting dataset and doing vad again', extra={"verbosity": CURRENT_VERBOSITY})
        for i in range(len(info)):
            tmp_lists = []
            with open(feat_dir + '/' + info[i] + '.list', 'r') as f:
                while True:
                    line = f.readline()
                    if line == '':
                        break
                    wav_path = line.strip()
                    tmp_lists.append(wav_path)
            wav_lists.append(tmp_lists)
    return wav_lists


def prepare_data(audio_dir, feat_dir, vad_audio_dir, num_frame_len_fft=2048, num_frame_shift_fft=1024,
                 num_mel_bands=128, sample_rate=44100, power=2, is_vad=True, speed_list=[0.85, 1.15]):
    '''数据准备

    Args:
        audio_dir: 音频数据集目录
        feat_dir: 生成的特征和标注目录
    '''
    fbank_dict = {}
    label_dict = {}
    info = ['train', 'dev', 'test']
    scp_dir = audio_dir + '/wav.scp'
    common.create_folder(feat_dir)

    # 0. 数据集预处理
    init_misc(LABELS, audio_dir, feat_dir)
    wav_lists = preprocess_dataset(scp_dir, feat_dir, audio_dir, vad_audio_dir,
                                   LABELS, info, is_vad, speed_list)

    # 1. 处理训练集和验证集
    # 所有句子的结果都拼接在一起
    for i in range(len(info) - 1):
        # 提取特征的标注
        logger.info('Start making feat and label for %s' % (info[i]), extra={"verbosity": CURRENT_VERBOSITY})
        wav_dict = {}                   # 标注词典： key为wav路径，value为标注
        for wav in wav_lists[i]:
            wav_dict[wav] = LABELS[wav.split('/')[-2]]
        # 对wav_dict进行排序
        # 保证拼接时大部分的chunk都是属于同一个类别的
        wav_dict = collections.OrderedDict(wav_dict)

        fbank, label = make_feat_label(wav_dict, num_frame_len_fft, num_frame_shift_fft,
                                       num_mel_bands, sample_rate, power)
        fbank_dict[info[i]] = fbank
        label_dict[info[i]] = label
    # CMVN过程，fit表示存储训练集的均值和方差，transform表示根据训练集的分布对数据进行归一化
    X_train = SCLAER.fit_transform(fbank_dict['train'])
    X_dev = SCLAER.transform(fbank_dict['dev'])

    with open(os.path.join(feat_dir, 'train.pickle'), 'wb') as f:
        pickle.dump(X_train, f)
        pickle.dump(X_dev, f)
        pickle.dump(label_dict['train'], f)
        pickle.dump(label_dict['dev'], f)

    # 2. 处理测试集
    # 每个句子的结果单独
    test_fbank_list = []
    test_label_list = []
    logger.info('Start making feat and label for test', extra={"verbosity": CURRENT_VERBOSITY})
    for wav in wav_lists[2]:
        wav_dict = {}
        wav_dict[wav] = LABELS[wav.split('/')[-2]]
        fbank, label = make_feat_label(wav_dict, num_frame_len_fft, num_frame_shift_fft,
                                       num_mel_bands, sample_rate, power)
        fbank = SCLAER.transform(fbank)
        test_fbank_list.append(fbank)
        test_label_list.append(label)
    X_test = np.array(test_fbank_list)
    label_dict['test'] = np.array(test_label_list)
    with open(os.path.join(feat_dir, 'test.pickle'), 'wb') as f:
        pickle.dump(X_test, f)
        pickle.dump(label_dict['test'], f)

    # 3. 将最终的wav写到文件中
    for i in range(len(info)):
        with open(feat_dir + '/' + info[i] + '.list', 'w') as f:
            for wav_path in wav_lists[i]:
                f.write(wav_path + '\n')


if __name__ == '__main__':
    audio_dir = '/home/xiaorong/Data/SED_SMALL'
    feat_dir = '/home/xiaorong/workstation/sed-crnn-master/exp/feat'
    sample_rate = 44100
    num_frame_len_fft = 2048            # 帧长的fft点数
    num_frame_shift_fft = 1024          # 帧移的fft点数
    power = 2                           # 采用功率谱
    num_mel_bands = 128                 # mel维度

    prepare_data(audio_dir, feat_dir, num_frame_len_fft, num_frame_shift_fft,
                 num_mel_bands, sample_rate, power, None)
