#!/usr/bin/env python
# encoding: utf-8

import torchaudio
from scipy.io import wavfile
import soundfile as sf
import matplotlib.pyplot as plt
import torch
import numpy as np
from sklearn.manifold import TSNE
from multiprocessing import Pool


def compute_deltas(specgram):
    delta1 = torchaudio.functional.compute_deltas(specgram)
    delta2 = torchaudio.functional.compute_deltas(delta1)
    specgram = torch.cat([specgram, delta1, delta2], axis=-1)
    return specgram


def get_mel_mean(filename):
    audio, sample_rate = sf.read(filename)
    audio = torchaudio.functional.vad(waveform=torch.Tensor(audio), sample_rate=sample_rate)
    mel = torchaudio.transforms.MFCC(sample_rate = 16000, n_mfcc = 40, log_mels=True)(audio)
    mel = compute_deltas(mel).numpy()
    mel = np.mean(mel, axis=1)
    return mel


def draw_tsne(data_list_path, fig_path="tsne.png"):
    # get list
    auido_Sequences, Speakers = torch.load(data_list_path)

    auido_Sequences = np.array(auido_Sequences).T
    data_label = np.array(auido_Sequences[0]).astype(int)
    data_path = auido_Sequences[1]

    # get wav_mel
    mel_arr = []

    with Pool(10) as p:
        data = list(p.map(get_mel_mean, data_path))

    data = np.array(data)
    label = data_label

    # draw tsne
    print(data.shape)
    input(len(label))

    tsne = TSNE(n_components=2, init='pca', learning_rate=10, perplexity=12, n_iter=1000)
    transformed_data = tsne.fit_transform(data)

    plt.figure()
    plt.scatter(transformed_data[:, 0], transformed_data[:, 1], 10, c=label, cmap=plt.cm.Spectral, alpha=0.5)
    plt.title('audio embedding visualization')
    plt.savefig(fig_path)

if __name__ == "__main__":
    pathfile = 'data/dev_list'
    draw_tsne(pathfile)
