import os
import re
import gc
import tensorflow.keras as keras
import numpy as np
import pandas as pd
from glob import glob
from scipy import signal
from scipy.io import wavfile
from scipy.io import wavfile
from datetime import datetime
from scipy.fftpack import fft
from sklearn.model_selection import train_test_split
from tensorflow.keras import optimizers, losses, activations, models
from tensorflow.keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization
import sys
from sklearn.metrics import accuracy_score, confusion_matrix
import tensorflow as tf
import pickle


# 傅里叶
def custom_fft(y, fs):
    T = 1.0 / fs
    N = y.shape[0]
    yf = fft(y)
    xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
    # FFT is simmetrical, so we take just the first half
    # FFT is also complex, to we take just the real part (abs)
    vals = 2.0 / N * np.abs(yf[0:N // 2])
    return xf, vals


# 日志
# def log_specgram(audio, sample_rate, window_size=20, step_size=10, eps=1e-10):
def log_specgram(audio, sample_rate, window_size=20, step_size=10, eps=1e-10):
    nperseg = int(round(window_size * sample_rate / 1e3))
    noverlap = int(round(step_size * sample_rate / 1e3))
    freqs, times, spec = signal.spectrogram(audio,
                                            fs=sample_rate,
                                            window='hann',
                                            nperseg=nperseg,
                                            noverlap=noverlap,
                                            detrend=False)
    return freqs, times, np.log(spec.T.astype(np.float32) + eps)


# wav帧列表
def list_wavs_fname(dirpath, ext='wav'):
    print(dirpath)
    # fpaths = glob(os.path.join(dirpath, r'*/*' + ext))
    fpaths = glob(dirpath + '/' + r'*/*' + ext)
    fpaths = [normalize_path(path) for path in fpaths]
    pat = r'.+/([^/]+)/([^/]+\.' + ext + ')$'
    labels = []
    fnames = []
    for fpath in fpaths:
        r = re.match(pat, fpath)
        if r:
            labels.append(r.group(1))
            fnames.append(r.group(2))
        else:
            continue
    return labels, fnames


# 垫音频
def pad_audio(samples):
    if len(samples) >= L:
        return samples
    else:
        return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))


# 切音频
def chop_audio(samples, L=16000, num=20):
    for i in range(num):
        beg = np.random.randint(0, len(samples) - L)
        yield samples[beg: beg + L]


# 标签转换
def label_transform(labels):
    nlabels = []
    for label in labels:
        if label == '_background_noise_':
            nlabels.append('silence')
        elif label not in legal_labels:
            nlabels.append('unknown')
        else:
            nlabels.append(label)
    return pd.get_dummies(pd.Series(nlabels))


# 模型
def model_cnn(x_train, y_train):
    the_path = os.path.join(model_path, 'cnn.model')
    epoch_path = os.path.join(the_path, '..', 'epoch.txt')
    base_epoch = 0
    if os.path.exists(the_path):
        if not IS_GO_ON:
            model = keras.models.load_model(the_path)
            model.summary()
            return model
        with open(epoch_path, 'r') as f:
            base_epoch = int(f.read())
        model = keras.models.load_model(the_path)
    else:
        input_shape = (99, 81, 1)
        # nclass = 12
        nclass = y_train.shape[1]
        inp = Input(shape=input_shape)
        norm_inp = BatchNormalization()(inp)
        img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(norm_inp)
        img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(img_1)
        img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
        img_1 = Dropout(rate=0.2)(img_1)
        img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
        img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
        img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
        img_1 = Dropout(rate=0.2)(img_1)
        img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
        img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
        img_1 = Dropout(rate=0.2)(img_1)
        img_1 = Flatten()(img_1)

        dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(img_1))
        dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
        dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)

        model = models.Model(inputs=inp, outputs=dense_1)
        opt = optimizers.Adam()

        # model.compile(optimizer=opt, loss=losses.binary_crossentropy)
        model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=keras.metrics.categorical_accuracy)
    model.summary()

    x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=2017)
    # model.fit(x_train, y_train, batch_size=16, validation_data=(x_valid, y_valid), epochs=3, shuffle=True, verbose=2)
    model.fit(x_train, y_train, batch_size=16, validation_data=(x_valid, y_valid),
              epochs=base_epoch + N_EPOCHS, shuffle=True, verbose=1, initial_epoch=base_epoch,
              callbacks=keras.callbacks.TensorBoard(LOG_DIR, update_freq='epoch', profile_batch=0))

    model.save(the_path)
    with open(epoch_path, 'w') as f:
        f.write(f'{base_epoch + N_EPOCHS}')

    return model


# 测试数据生成器
def test_data_generator(batch=16):
    # fpaths = glob(os.path.join(test_data_path, '*wav'))
    fpaths = glob(test_data_path + '/' + '*/*wav')
    i = 0
    for path in fpaths:
        path = normalize_path(path)
        if i == 0:
            imgs = []
            fnames = []
            trues = []
        i += 1
        rate, samples = wavfile.read(path)
        samples = samples[:, 0]
        samples = pad_audio(samples)
        xlen = samples.shape[0]
        # print('samples', samples.shape)
        samples = samples[xlen // 2 - L // 2:xlen // 2 + L // 2]
        # print('samples', samples.shape)
        resampled = signal.resample(samples, int(new_sample_rate / rate * samples.shape[0]))
        # print('resampled', resampled.shape)
        _, _, specgram = log_specgram(resampled, sample_rate=new_sample_rate)
        imgs.append(specgram)
        # fnames.append(path.split('\\')[-1])
        path_arr = path.split('/')
        fnames.append(path_arr[-1])
        trues.append(path_arr[-2])
        if i == batch:
            i = 0
            imgs = np.array(imgs)
            imgs = imgs.reshape(tuple(list(imgs.shape) + [1]))
            yield fnames, imgs, trues
    if i < batch:
        imgs = np.array(imgs)
        imgs = imgs.reshape(tuple(list(imgs.shape) + [1]))
        yield fnames, imgs, trues
    # raise StopIteration()


def normalize_path(path):
    return path.replace('\\', '/')


if __name__ == "__main__":
    L = 16000
    # L = 31744  # ATTENTION Cannot use this value
    new_sample_rate = 8000
    # legal_labels = 'yes no up down left right on off stop go silence unknown'.split()
    legal_labels = 'left right stop'.split()

    np.random.seed(1)
    tf.random.set_seed(1)

    # src folders
    VER = 'v7.0'
    N_EPOCHS = 5
    IS_GO_ON = 0
    BASE_DIR, FILE_NAME = os.path.split(__file__)
    SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
    LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)
    # root_path = r'..'
    root_path = r'.'
    # out_path = r'.'
    out_path = r'out'
    # model_path = r'.'
    model_path = r'model'
    root_path = os.path.join(SAVE_DIR, root_path)
    out_path = os.path.join(SAVE_DIR, out_path)
    model_path = os.path.join(SAVE_DIR, model_path)
    data_dir = '../../../../../large_data/audio/_many_files/direction_data_3rd_train_test_split'
    # train_data_path = os.path.join(out_path, 'input', 'train', 'audio')
    train_data_path = normalize_path(os.path.join(BASE_DIR, data_dir, 'train'))
    # test_data_path = os.path.join(out_path, 'input', 'test', 'audio_100')
    test_data_path = normalize_path(os.path.join(BASE_DIR, data_dir, 'test'))

    task_begin = datetime.now()
    print(str(task_begin) + " task begin")

    path = os.path.join(SAVE_DIR, 'data_cache', 'data_cache.pkl')
    if os.path.exists(path):
        print('Loading dataset ...')
        with open(path, 'rb') as f:
            dataset = pickle.load(f)
        x_train = dataset['x_train']
        y_train = dataset['y_train']
        label_index = dataset['label_index']
        print('Loaded')
    else:

        labels, fnames = list_wavs_fname(train_data_path)
        print('labels', np.shape(labels))
        print('labels[:5]', labels[:5])
        print('fnames', np.shape(fnames))
        print('fnames[:5]', fnames[:5])

        y_train = []
        x_train = []

        xy_begin = datetime.now()
        print(str(xy_begin) + " xy begin")

        cnt = 0
        for label, fname in zip(labels, fnames):
            sample_rate, samples = wavfile.read(os.path.join(train_data_path, label, fname))
            samples = samples[:, 0]
            samples = pad_audio(samples)
            if len(samples) > L:
                n_samples = chop_audio(samples)
            else:
                n_samples = [samples]
            for samples in n_samples:
                resampled = signal.resample(samples, int(new_sample_rate / sample_rate * samples.shape[0]))
                _, _, specgram = log_specgram(resampled, sample_rate=new_sample_rate)
                y_train.append(label)
                x_train.append(specgram)
            cnt += 1
            if cnt % 25 == 0:
                print(f'Process {cnt} files.')

        reshape_begin = datetime.now()
        print(str(reshape_begin) + " reshape begin")

        x_train = np.array(x_train)
        x_train = x_train.reshape(tuple(list(x_train.shape) + [1]))
        y_train = label_transform(y_train)
        label_index = y_train.columns.values
        y_train = y_train.values
        y_train = np.array(y_train)
        del labels, fnames
        gc.collect()
        print('Saving ...')
        os.makedirs(os.path.split(path)[0], exist_ok=True)
        dataset = dict(x_train=x_train, y_train=y_train, label_index=label_index)
        with open(path, 'wb') as f:
            pickle.dump(dataset, f)
        print('Saved dataset.')

    print('x_train', x_train.shape)
    print('y_train', y_train.shape)

    model_begin = datetime.now()
    print(str(model_begin) + " model begin")
    model = model_cnn(x_train, y_train)

    predict_begin = datetime.now()
    print(str(predict_begin) + " predict begin")

    del x_train, y_train
    gc.collect()

    index = []
    results = []
    labels = []
    for fnames, imgs, trues in test_data_generator(batch=32):
        print('imgs', imgs.shape)
        print('trues', np.shape(trues))

        predicts = model.predict(imgs)
        predicts = np.argmax(predicts, axis=1)
        predicts = [label_index[p] for p in predicts]
        index.extend(fnames)
        results.extend(predicts)
        labels.extend(trues)

    acc = accuracy_score(labels, results)
    print(f'Accuracy: {acc}')
    mat = confusion_matrix(labels, results)
    print('Confusion matrix')
    print(mat)

    print('Writing result ...')
    df = pd.DataFrame(columns=['fname', 'label', 'true'])
    df['fname'] = index
    df['label'] = results
    df['true'] = labels
    csv_path = os.path.join(out_path, 'sub.csv')
    os.makedirs(os.path.split(csv_path)[0], exist_ok=True)
    df.to_csv(csv_path, index=False)
    print(f'Written to {csv_path}')
    print('Over')
