"""
MNMF & ILRMA Separation offline room simulation（未完成）
======================================================
目前可以进行初步仿真，但仍需继续完善，只能生成SIR和SDR的数据（仿真数据保存在data中），画图函数还没加入
仿真的参数需要在main_sim_config.json中进行编辑

This script requires the `mir_eval` to run, and `tkinter` and `sounddevice` packages for the GUI option.
"""
import matplotlib
import numpy as np
import time, sys, os
from scipy.io import wavfile
import chainer
from chainer import cuda, serializers
from chainer import functions as chf
from progressbar import progressbar
import librosa
import soundfile as sf
import pickle as pic
import argparse, json, os, sys
import pyroomacoustics as pra
import rrtools

# find the absolute path to this file
base_dir = os.path.abspath(os.path.split(__file__)[0])

matplotlib.use("TkAgg")
sys.path.append("./CupyLibrary")
sys.path.append("./FullRank_Model")
sys.path.append("./Rank1_Model")
sys.path.append("./Jointly_Diagonalizable_FullRank_Model")

from configure import *
from MNMF import MNMF
from MNMF_online import MNMF_online, pre_batch_init
from MNMF_DP import MNMF_DP
from MNMF_DP_online import MNMF_DP_online, pre_batch_init_DP
from FastMNMF import FastMNMF
from FastMNMF_DP import FastMNMF_DP
from ILRMA import ILRMA
from FastMNMF_online import FastMNMF_online
from FastMNMF2_online import FastMNMF2_online


from mir_eval.separation import bss_eval_sources

from routines import (
    PlaySoundGUI,
    grid_layout,
    semi_circle_layout,
    random_layout,
    gm_layout,
)

# Get the data if needed
from get_data import get_data, samples_dir
get_data()

# Once we are sure the data is there, import some methods
# to select and read samples
sys.path.append(samples_dir)
from generate_samples import sampling, wav_read_center

def init(parameters):
    parameters["base_dir"] = base_dir


def one_loop(args):
    global parameters

    import time
    import numpy

    np = numpy

    import pyroomacoustics

    pra = pyroomacoustics

    import sys

    sys.path.append(parameters["base_dir"])

    from routines import semi_circle_layout, random_layout, gm_layout, grid_layout
    from MNMF import MNMF
    from MNMF_online import MNMF_online, pre_batch_init
    from MNMF_DP import MNMF_DP
    from MNMF_DP_online import MNMF_DP_online, pre_batch_init_DP
    from FastMNMF import FastMNMF
    from FastMNMF_online import FastMNMF_online, pre_batch_init_fast
    from FastMNMF2_online import FastMNMF2_online, pre_batch_init_fast2
    from FastMNMF_DP import FastMNMF_DP
    from ILRMA import ILRMA

    # import samples helper routine
    from get_data import samples_dir
    sys.path.append(samples_dir)
    from generate_samples import wav_read_center

    n_targets, n_mics, rt60, sinr, wav_files, seed = args

    # this is the underdetermined case. We don't do that.
    if n_mics < n_targets:
        return []

    # set MKL to only use one thread if present
    try:
        import mkl

        mkl.set_num_threads(1)
    except ImportError:
        pass

    # set the RNG seed
    rng_state = np.random.get_state()
    np.random.seed(seed)

    # STFT parameters
    framesize = parameters["stft_params"]["framesize"]
    win_a = pra.hann(framesize)
    win_s = pra.transform.compute_synthesis_window(win_a, framesize // 4)

    # Generate the audio signals

    # get the simulation parameters from the json file
    # Simulation parameters
    n_repeat = parameters["n_repeat"]
    fs = parameters["fs"]
    snr = parameters["snr"]
    gpu = parameters["gpu"]

    n_interferers = parameters["n_interferers"]
    n_latent = parameters["n_latent"]
    ref_mic = parameters["ref_mic"]
    room_dim = np.array(parameters["room_dim"])

    first_batch_size = parameters["first_batch_size"]
    mini_batch_size = parameters["mini_batch_size"]

    sources_var = np.ones(n_targets)
    sources_var[0] = parameters["weak_source_var"]

    # total number of sources
    n_sources = n_interferers + n_targets

    # Geometry of the room and location of sources and microphones
    # interferer_locs = random_layout(
    #     [3.0, 5.5, 1.5], n_interferers, offset=[6.5, 1.0, 0.5], seed=1
    # )
    interferer_locs = random_layout(
        [3.0, 5.5, 0], n_interferers, offset=[6.0, 1.0, 1.2], seed=123
    )

    target_locs = semi_circle_layout(
        [4.1, 3.755, 1.2],
        np.pi / 1.5,
        2.0,  # 120 degrees arc, 2 meters away
        n_targets,
        rot=0.743 * np.pi,
    )

    source_locs = np.concatenate((target_locs, interferer_locs), axis=1)

    mic_locs = semi_circle_layout([4.1, 3.76, 1.2], np.pi, 0.04, n_mics, rot=np.pi / 2. * 0.99)

    signals = wav_read_center(wav_files, seed=123)

    # Create the room itself
    room = pra.ShoeBox(
        room_dim,
        fs=fs,
        absorption=parameters["rt60_list"][rt60]["absorption"],
        max_order=parameters["rt60_list"][rt60]["max_order"],
    )

    # Place all the sound sources
    for sig, loc in zip(signals[-n_sources:, :], source_locs.T):
        room.add_source(loc, signal=sig)

    assert len(room.sources) == n_sources, (
        "Number of signals ({}) doesn"
        "t match number of sources ({})".format(signals.shape[0], n_sources)
    )

    # Place the microphone array
    room.add_microphone_array(pra.MicrophoneArray(mic_locs, fs=room.fs))

    # compute RIRs
    room.compute_rir()

    # Run the simulation
    premix = room.simulate(return_premix=True)  # shape (n_src, n_mics, n_samples)
    n_samples = premix.shape[2]

    # Normalize the signals so that they all have unit
    # variance at the reference microphone
    p_mic_ref = np.std(premix[:, ref_mic, :], axis=1)
    premix /= p_mic_ref[:, None, None]

    # scale to pre-defined variance
    premix[:n_targets, :, :] *= np.sqrt(sources_var[:, None, None])

    # compute noise variance
    sigma_n = np.sqrt(10 ** (-snr / 10) * np.sum(sources_var))

    # now compute the power of interference signal needed to achieve desired SINR
    sigma_i = np.sqrt(
        np.maximum(0, 10 ** (-sinr / 10) * np.sum(sources_var) - sigma_n ** 2)
        / n_interferers
    )
    premix[n_targets:, :, :] *= sigma_i

    # sum up the background
    # shape (n_mics, n_samples)
    background = (
            np.sum(premix[n_targets:, :, :], axis=0)
            + sigma_n * np.random.randn(*premix.shape[1:])
            )

    # Mix down the recorded signals
    mix = np.sum(premix[:n_targets], axis=0) + background

    # shape (n_targets+1, n_samples, n_mics)
    ref = np.zeros((n_targets+1, premix.shape[2], premix.shape[1]), dtype=premix.dtype)  
    ref[:n_targets, :, :] = premix[:n_targets, :, :].swapaxes(1, 2)
    ref[n_targets, :, :] = background.T

    synth = np.zeros_like(ref)
    synth[n_targets, :, 0] = np.random.randn(synth.shape[1])  # fill this to compare to background

    # START BSS
    ###########
    sys.path.append("./DeepSpeechPrior")
    import network_VAE
    model_fileName = "./DeepSpeechPrior/model-VAE-best-scale=gamma-D={}.npz".format(n_latent)
    speech_VAE = network_VAE.VAE(n_latent=n_latent)
    serializers.load_npz(model_fileName, speech_VAE)
    name_DNN = "VAE"

    if gpu < 0:
        import numpy as xp
    else:
        import cupy as xp
        print("Use GPU " + str(gpu))
        cuda.get_device_from_id(gpu).use()
        speech_VAE.to_gpu()

    # shape: (n_frames, n_freq, n_mics)
    X_all = pra.transform.analysis(mix.T, framesize, framesize // 4, win=win_a)
    X_mics = X_all[:, :, :n_mics]
    X_sim = X_mics.swapaxes(0,1)

    # convergence monitoring callback
    def convergence_callback(Y, n_targets, SDR, SIR, ref, framesize, win_s, algo_name):
        from mir_eval.separation import bss_eval_sources

        if Y.shape[2] == 1:
            y = pra.transform.synthesis(
                Y[:, :, 0], framesize, framesize // 4, win=win_s
            )[:, None]
        else:
            y = pra.transform.synthesis(Y, framesize, framesize // 4, win=win_s)

        if algo_name not in parameters["overdet_algos"]:
            new_ord = np.argsort(np.std(y, axis=0))[::-1]
            y = y[:, new_ord]

        m = np.minimum(y.shape[0] - framesize // 4, ref.shape[1])

        synth[:n_targets, :m, 0] = y[framesize // 4 : m + framesize // 4, :n_targets].T

        # 这里计算出target和BG的SIR & SDR
        sdr, sir, sar, perm = bss_eval_sources(
                ref[:n_targets+1, :m, 0], synth[:, :m, 0]
        )

        # 这里仅保留target的SIR & SDR为最后的输出结果
        SDR.append(sdr[:n_targets].tolist())
        SIR.append(sir[:n_targets].tolist())

    # store results in a list, one entry per algorithm
    results = []

    # compute the initial values of SDR/SIR
    init_sdr = []
    init_sir = []
    if not parameters["monitor_convergence"]:
        convergence_callback(
            X_mics, n_targets, init_sdr, init_sir, ref, framesize, win_s, "init"
        )

    for full_name, params in parameters["algorithm_kwargs"].items():

        name = params['algo']
        kwargs1 = params['kwargs1']
        kwargs2 = params['kwargs2']

        results.append(
            {
                "algorithm": full_name,
                "n_targets": n_targets,
                "n_mics": n_mics,
                "rt60": rt60,
                "sinr": sinr,
                "seed": seed,
                "sdr": [],
                "sir": [],  # to store the result
                "runtime" : np.nan,
                "n_samples" : n_samples,
            }
        )

        if parameters["monitor_convergence"]:

            def cb(Y):
                convergence_callback(
                    Y,
                    n_targets,
                    results[-1]["sdr"],
                    results[-1]["sir"],
                    ref,
                    framesize,
                    win_s,
                    name,
                )

        else:
            cb = None
            # avoid one computation by using the initial values of sdr/sir
            results[-1]["sdr"].append(init_sdr[0])
            results[-1]["sir"].append(init_sir[0])

        try:
            t_start = time.perf_counter()

            if name == "MNMF":
                # Run MNMF
                separater = MNMF(xp=xp, **kwargs1)
                separater.load_spectrogram(X_sim)
                separater.file_id = 0
                separater.fs = fs
                Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

            elif name == "MNMF_online":
                # Run MNMF_online
                T = X_sim.shape[1]
                Y = 0
                for t in range(first_batch_size, T + 1):
                    # online分帧
                    if t == first_batch_size:
                        tmp = X_sim[:, :t, :]
                    else:
                        tmp = X_sim[:, t - mini_batch_size:t, :]
                    spec = tmp

                    separater = MNMF_online(xp=xp, **kwargs1, total_frame=T, all_sep_spec=Y, first_batch_size=first_batch_size)
                    separater.load_spectrogram(spec)
                    separater.file_id = t+1-first_batch_size
                    separater.fs = fs

                    if separater.file_id == 1:
                        pre_batch = pre_batch_init()
                    else:
                        # W参数的传递
                        separater.pre_batch_W_a_1 = pre_batch.W_a_1
                        separater.pre_batch_W_b_1 = pre_batch.W_b_1
                        separater.pre_batch_W_NFK = pre_batch.W_NFK
                        # G参数的传递
                        separater.pre_batch_phi = pre_batch.phi
                        separater.pre_batch_psi = pre_batch.psi
                        separater.pre_batch_covarianceMatrix_NFMM = pre_batch.covarianceMatrix_NFMM

                    Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

                    # 在线的参数保存，提供给下一个mini-batch
                    # W参数的保存
                    pre_batch.W_a_1 = separater.W_a_1
                    pre_batch.W_b_1 = separater.W_b_1
                    pre_batch.W_NFK = separater.W_NFK
                    # G参数的保存
                    pre_batch.phi = separater.phi
                    pre_batch.psi = separater.psi
                    pre_batch.covarianceMatrix_NFMM = separater.covarianceMatrix_NFMM

            elif name == "MNMF_DP":
                # Run MNMF_DP
                separater = MNMF_DP(speech_VAE=speech_VAE, xp=xp, n_latent=n_latent, **kwargs1)
                separater.load_spectrogram(X_sim)
                separater.name_DNN = name_DNN
                separater.file_id = 0
                separater.fs = fs
                Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

            elif name == "MNMF_DP_online":
                # Run MNMF_DP_online
                T = X_sim.shape[1]
                Y = 0
                for t in range(first_batch_size, T + 1):
                    # online分帧
                    if t == first_batch_size:
                        tmp = X_sim[:, :t, :]
                    else:
                        tmp = X_sim[:, t - mini_batch_size:t, :]
                    spec = tmp

                    separater = MNMF_DP_online(speech_VAE=speech_VAE, xp=xp, n_latent=n_latent, **kwargs1, total_frame=T, all_sep_spec=Y, first_batch_size=first_batch_size)
                    separater.load_spectrogram(spec)
                    separater.name_DNN = name_DNN
                    separater.file_id = t+1-first_batch_size
                    separater.fs = fs

                    if separater.file_id == 1:
                        pre_batch = pre_batch_init_DP()
                    else:
                        # u参数的传递
                        separater.pre_batch_u_a_1 = pre_batch.u_a_1
                        separater.pre_batch_u_b_1 = pre_batch.u_b_1
                        separater.pre_batch_u_F = pre_batch.u_F
                        # W参数的传递
                        separater.pre_batch_W_a_1 = pre_batch.W_a_1
                        separater.pre_batch_W_b_1 = pre_batch.W_b_1
                        separater.pre_batch_W_noise_NnFK = pre_batch.W_noise_NnFK
                        # G参数的传递
                        separater.pre_batch_phi = pre_batch.phi
                        separater.pre_batch_psi = pre_batch.psi
                        separater.pre_batch_covarianceMatrix_NFMM = pre_batch.covarianceMatrix_NFMM

                    Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

                    # 在线的参数保存，提供给下一个mini-batch
                    # u参数的保存
                    pre_batch.u_a_1 = separater.u_a_1
                    pre_batch.u_b_1 = separater.u_b_1
                    pre_batch.u_F = separater.u_F
                    # W参数的保存
                    pre_batch.W_a_1 = separater.W_a_1
                    pre_batch.W_b_1 = separater.W_b_1
                    pre_batch.W_noise_NnFK = separater.W_noise_NnFK
                    # G参数的保存
                    pre_batch.phi = separater.phi
                    pre_batch.psi = separater.psi
                    pre_batch.covarianceMatrix_NFMM = separater.covarianceMatrix_NFMM

            elif name == "FastMNMF":
                # Run FastMNMF
                separater = FastMNMF(xp=xp, **kwargs1)
                separater.load_spectrogram(X_sim)
                separater.file_id = 0
                separater.fs = fs
                Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

            elif name == "FastMNMF_online":
                # Run FastMNMF_online
                T = X_sim.shape[1]
                Y = 0
                for t in range(first_batch_size, T + 1):
                    # online分帧
                    if t == first_batch_size:
                        tmp = X_sim[:, :t, :]
                    else:
                        tmp = X_sim[:, t - mini_batch_size:t, :]
                    spec = tmp

                    separater = FastMNMF_online(xp=xp, **kwargs1, total_frame=T, all_sep_spec=Y, first_batch_size=first_batch_size)
                    separater.load_spectrogram(spec)
                    separater.file_id = t + 1 - first_batch_size
                    separater.fs = fs

                    if separater.file_id == 1:
                        pre_batch = pre_batch_init_fast()
                    else:
                        # W参数的传递
                        separater.pre_batch_a_W = pre_batch.a_W
                        separater.pre_batch_b_W = pre_batch.b_W
                        separater.pre_batch_W_NFK = pre_batch.W_NFK
                        # G参数的传递
                        separater.pre_batch_G_a_1 = pre_batch.G_a_1
                        separater.pre_batch_G_b_1 = pre_batch.G_b_1
                        separater.pre_batch_G_NFM = pre_batch.G_NFM

                    Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

                    # 在线的参数保存，提供给下一个mini-batch
                    # W参数的保存
                    pre_batch.a_W = separater.a_W
                    pre_batch.b_W = separater.b_W
                    pre_batch.W_NFK = separater.W_NFK
                    # G参数的保存
                    pre_batch.G_a_1 = separater.G_a_1
                    pre_batch.G_b_1 = separater.G_b_1
                    pre_batch.G_NFM = separater.G_NFM

            elif name == "FastMNMF2_online":
                # Run FastMNMF2_online
                T = X_sim.shape[1]
                Y = 0
                for t in range(first_batch_size, T + 1):
                    # online分帧
                    if t == first_batch_size:
                        tmp = X_sim[:, :t, :]
                    else:
                        tmp = X_sim[:, t - mini_batch_size:t, :]
                    spec = tmp

                    separater = FastMNMF2_online(xp=xp, **kwargs1, total_frame=T, all_sep_spec=Y, first_batch_size=first_batch_size)
                    separater.load_spectrogram(spec)
                    separater.file_id = t + 1 - first_batch_size
                    separater.fs = fs

                    if separater.file_id == 1:
                        pre_batch = pre_batch_init_fast2()
                    else:
                        # W参数的传递
                        separater.pre_batch_a_W = pre_batch.a_W
                        separater.pre_batch_b_W = pre_batch.b_W
                        separater.pre_batch_W_NFK = pre_batch.W_NFK
                        # G参数的传递
                        separater.pre_batch_G_a_1 = pre_batch.G_a_1
                        separater.pre_batch_G_b_1 = pre_batch.G_b_1
                        separater.pre_batch_G_NM = pre_batch.G_NM

                    Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

                    # 在线的参数保存，提供给下一个mini-batch
                    # W参数的保存
                    pre_batch.a_W = separater.a_W
                    pre_batch.b_W = separater.b_W
                    pre_batch.W_NFK = separater.W_NFK
                    # G参数的保存
                    pre_batch.G_a_1 = separater.G_a_1
                    pre_batch.G_b_1 = separater.G_b_1
                    pre_batch.G_NM = separater.G_NM

            elif name == "FastMNMF_DP":
                # Run FastMNMF_DP
                separater = FastMNMF_DP(speech_VAE=speech_VAE, xp=xp, n_latent=n_latent, **kwargs1)
                separater.load_spectrogram(X_sim)
                separater.name_DNN = name_DNN
                separater.file_id = 0
                separater.fs = fs
                Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

            elif name == "ILRMA":
                # Run ILRMA
                separater = ILRMA(xp=xp, **kwargs1)
                separater.load_spectrogram(X_sim)
                separater.file_id = 0
                separater.fs = fs
                Y = separater.solve(save_likelihood=False, save_parameter=False, **kwargs2)

            else:
                continue

            t_finish = time.perf_counter()

            # The last evaluation
            convergence_callback(
                Y,
                n_targets,
                results[-1]["sdr"],
                results[-1]["sir"],
                ref,
                framesize,
                win_s,
                name,
            )

            results[-1]["runtime"] = t_finish - t_start

        except:
            import os, json

            pid = os.getpid()
            # report last sdr/sir as np.nan
            results[-1]["sdr"].append(np.nan)
            results[-1]["sir"].append(np.nan)
            # now write the problem to file
            fn_err = os.path.join(
                parameters["_results_dir"], "error_{}.json".format(pid)
            )
            with open(fn_err, "a") as f:
                f.write(json.dumps(results[-1], indent=4))
            # skip to next iteration
            continue

    # restore RNG former state
    np.random.set_state(rng_state)

    return results


def generate_arguments(parameters):
    """ This will generate the list of arguments to run simulation for """

    rng_state = np.random.get_state()
    np.random.seed(parameters["seed"])

    gen_files_seed = int(np.random.randint(2 ** 32, dtype=np.uint32))
    all_wav_files = sampling(
        parameters["n_repeat"],
        parameters["n_interferers"] + np.max(parameters["n_targets_list"]),
        parameters["samples_list"],
        gender_balanced=True,
        seed=gen_files_seed,
    )

    args = []

    for n_targets in parameters["n_targets_list"]:
        for n_mics in parameters["n_mics_list"]:

            # we don't do underdetermined
            if n_targets > n_mics:
                continue

            for rt60 in parameters["rt60_list"].keys():
                for sinr in parameters["sinr_list"]:
                    for wav_files in all_wav_files:

                        # generate the seed for this simulation
                        seed = int(np.random.randint(2 ** 32, dtype=np.uint32))

                        # add the new combination to the list
                        args.append([n_targets, n_mics, rt60, sinr, wav_files, seed])

    np.random.set_state(rng_state)

    return args


if __name__ == "__main__":

    rrtools.run(
        one_loop,
        generate_arguments,
        func_init=init,
        base_dir=base_dir,
        results_dir="data/",
        description="Simulation for Independent Vector Analysis with more Microphones than Sources (submitted WASPAA 2019)",
    )
