#!/usr/bin/env python3.7.9
'''
Copyright © 2021 DUE TUL
@ date  : Monday january 15, 2020
@ desc  : This modules is used to load raw data
@ author:
'''
import os
import random
import librosa
import pandas as pd
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn import preprocessing
import scipy


def _float_feature(value):
    if not isinstance(value, list):
        value = [value]
    return tf.train.Feature(float_list=tf.train.FloatList(value=value))


def _int64_feature(value):
    if not isinstance(value, list):
        value = [value]
    return tf.train.Feature(int64_list=tf.train.Int64List(value=value))


# add data to TFRecord
def data_example(data, label):
    feature = {
        'data': _float_feature(data),
        'label': _int64_feature(label),
    }
    return tf.train.Example(features=tf.train.Features(feature=feature))


def create_data_tfrecord(audio_data_list_path, acc_train_list_path, train_save_path, test_save_path):
    with open(acc_train_list_path, 'r+') as f, open(audio_data_list_path, 'r+') as audio_f:
        data = f.readlines()  # 所有的文件和标签
        audio_data = audio_f.readlines()  # 所有的文件和标签
        audio_paths = []
        audio_labels = []
        for _ in audio_data:
            audio_path, audio_label = _.replace('\n', '').split('\t')
            audio_paths.append(audio_path)
            audio_labels.append(audio_label)
    with tf.io.TFRecordWriter(train_save_path) as train:
        with tf.io.TFRecordWriter(test_save_path) as test:
            for _i, d in enumerate(data):  # 表示每一个文件
                path, label = d.replace('\n', '').split('\t')
                index, time, z, y, x = np.loadtxt(path, delimiter=',', unpack=True, dtype=np.float64)
                x = x * 1.0 / (max(abs(x)))
                showarrt = {}
                for i in tqdm(range(int(len(x) / 1000))):  # 每一千行

                    index = int(1000 * i)
                    rx = x[index:(index + 1000)]
                    f, t, ps = scipy.signal.stft(rx, fs=1000, nperseg=256, noverlap=128, boundary=None, padded=None)
                    # 方便多线程
                    # ps, freqs, bins, im = plt.specgram(rx, NFFT=256, Fs=1000, noverlap=128)
                    acc_tf_ps = ps[1:, :]
                    acc_tf_ps = np.abs(acc_tf_ps)
                    a = np.min(acc_tf_ps)
                    acc_tf_ps[0:1] = [a, a, a, a, a, a]
                    # print(acc_tf_ps)
                    acc_feature = acc_tf_ps

                    # acc_feature = preprocessing.scale(acc_tf_ps)
                    # print(acc_tf_ps.shape)


                    # acc_tf_ps[0:1] = [0.000008, 0.000008, 0.000007, 0.000008, 0.000008, 0.000009]
                    # acc_tf_ps = preprocessing.scale(acc_tf_ps)
                    # audio start
                    count = _i * int(len(x) / 1000) + i
                    wav, sr = librosa.load(audio_paths[count], sr=16000)
                    intervals = librosa.effects.split(wav, top_db=20)
                    wav_output = []
                    # wave data length 16000 * time
                    wav_len = int(16000 * 1)
                    for sliced in intervals:
                        wav_output.extend(wav[sliced[0]:sliced[1]])
                    if len(wav_output) > wav_len:
                        l = len(wav_output) - wav_len
                        r = random.randint(0, l)
                        wav_output = wav_output[r:wav_len + r]
                    else:
                        wav_output.extend(np.zeros(shape=[wav_len - len(wav_output)], dtype=np.float32))
                    wav_output = np.array(wav_output)
                    # Convert to Mel spectrum

                    f, t, ps = scipy.signal.stft(wav_output, fs=1000, nperseg=256, noverlap=128, boundary=None,
                                                 padded=None)

                    # 方便多线程
                    # ps, freqs, bins, im = plt.specgram(wav_output, NFFT=256, Fs=16000, noverlap=128)
                    audio_tf_ps = ps[1:, :]
                    audio_tf_ps = np.abs(audio_tf_ps)
                    # audio_tf_ps = preprocessing.scale(audio_tf_ps)
                    # audio_tf_ps = audio_tf_ps.tolist()
                    # audio end
                    merge_tf_ps = np.column_stack((audio_tf_ps,
                                                   acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,
                                                   acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,
                                                   acc_feature,acc_feature,acc_feature,acc_feature,acc_feature))
                    if (count+1) % 100 ==0:
                        plt.imshow(merge_tf_ps, cmap='Blues', aspect='auto', origin='lower', extent=[0, 3, 0, 500], vmin=0)
                        plt.show()
                    # print("count:", count)
                    merge_tf_ps = merge_tf_ps.reshape(-1).tolist()
                    tf_example = data_example(merge_tf_ps, int(label))
                    if i % 7 == 0:
                        test.write(tf_example.SerializeToString())
                    else:
                        train.write(tf_example.SerializeToString())


def get_acc_data_list(mpudata_path, list_path):
    mpuclass = os.listdir(mpudata_path)
    f_acc = open(os.path.join(list_path, 'acc_data_list.txt'), 'w')
    for i in range(len(mpuclass)):
        single_path = os.path.join(mpudata_path, mpuclass[i])
        f_acc.write('%s\t%d\n' % (single_path, i))
        print("mpu：%d/%d" % (i + 1, len(mpuclass)))
    f_acc.close()


# Generate data list
def get_audio_data_list(audio_path, list_path):
    audios = os.listdir(audio_path)
    f_audio = open(os.path.join(list_path, 'audio_data_list.txt'), 'w')
    for i in range(len(audios)):
        sounds = os.listdir(os.path.join(audio_path, audios[i]))
        for sound in sounds:
            sound_path = os.path.join(audio_path, audios[i], sound)
            t = librosa.get_duration(filename=sound_path)
            # Filter audio less than 1 seconds
            if t >= 1:
                f_audio.write('%s\t%d\n' % (sound_path, i))
    f_audio.close()


# main function
if __name__ == '__main__':
    get_audio_data_list('dataset/audio', 'dataset/lists')
    get_acc_data_list('dataset/mpu', 'dataset/lists')
    create_data_tfrecord('dataset/lists/audio_data_list.txt', 'dataset/lists/acc_data_list.txt',
                         'dataset/tfrecords/non-sacle-merge_train.tfrecord',
                         'dataset/tfrecords/non-sacle-merge_test.tfrecord')
    # create_data_tfrecord('dataset/test_list.txt', 'dataset/test.tfrecord')
