#! /usr/bin/env python3
# coding: utf-8
# Ref. [1] Semi-Supervised Multichannel Speech Enhancement With a Deep Speech Prior
# Ref. [2] Fast Multichannel Nonnegative Matrix Factorization with Directivity-Aware Jointly-Diagonalizable
#          Spatial Covariance Matrices for Blind Source Separation

import sys, os
import numpy as np
import chainer
from chainer import functions as chf
import pickle as pic
from progressbar import progressbar

from configure_FastModel import *
from FastFCA_part import FastFCA_part

try:
    from chainer import cuda
    FLAG_GPU_Available = True
except:
    print("---Warning--- You cannot use GPU acceleration because chainer or cupy is not installed")

class FastMNMF_DP_part_new(FastFCA_part):
    """ Blind Speech Enhancement Using Fast Multichannel NMF with a Deep Speech Prior (FastMNMF_DP)

    X_FTM: the observed complex spectrogram
    Q_FMM: diagonalizer that converts a spatial covariance matrix (SCM) to a diagonal matrix
    G_NFM: diagonal elements of the diagonalized SCMs (N means the number of all sources)
    W_noise_NnFK: basis vectors for noise sources (Nn means the number of noise sources)
    H_noise_NnKT: activations for noise sources
    Z_speech_DT: latent variables for speech
    power_speech_FT: power spectra of speech that is the output of DNN(Z_speech_DT)
    lambda_NFT: power spectral densities of each source
        lambda_NFT[0] = U_F * V_T * power_speech_FT
        lambda_NFT[1:] = W_noise_NnFK @ H_noise_NnKT
    Qx_power_FTM: power spectra of Qx
    Y_FTM: \sum_n lambda_NFT G_NFM
    """

    def __init__(self, speech_VAE=None, n_noise=1, n_Z_iteration=30, n_latent=16, n_basis_noise=2, xp=np, init_SCM="unit", mode_update_Z="sampling", normalize_encoder_input=True, total_frame=0, all_sep_spec=0, first_batch_size=0, voice_frame=10, dl_ratio=0):
        """ initialize FastMNMF_DP

        Parameters:
        -----------
            n_noise: int
                the number of noise sources
            speech_VAE: VAE
                trained speech VAE network (necessary if you use VAE as speech model)
            n_latent: int
                the dimension of latent variable Z
            n_basis_noise: int
                the number of bases of each noise source
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_Z: str
                how to update latent variable Z {sampling, backprop}
        """
        super(FastMNMF_DP_part_new, self).__init__(n_source=n_noise+1, xp=xp, init_SCM=init_SCM)
        self.n_source, self.n_speech, self.n_noise = n_noise+1, 1, n_noise
        self.speech_VAE = speech_VAE
        self.n_Z_iteration = n_Z_iteration
        self.n_basis_noise = n_basis_noise
        self.n_latent = n_latent
        self.mode_update_Z = mode_update_Z
        self.normalize_encoder_input = normalize_encoder_input
        self.method_name = "FastMNMF_DP_part"
        self.total_frame = total_frame
        self.all_sep_spec = all_sep_spec
        self.first_batch_size = first_batch_size
        self.voice_frame = voice_frame
        self.dl_ratio = dl_ratio


    def set_parameter(self, n_noise=None, n_iteration=None, n_Z_iteration=None, n_basis_noise=None, init_SCM=None, mode_update_Z=None):
        """ set parameters

        Parameters:
        -----------
            n_noise: int
                the number of sources
            n_iteration: int
                the number of iteration
            n_Z_iteration: int
                the number of iteration of updating Z in each iteration
            n_basis_noise: int
                the number of basis of noise sources
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_Z: str
                how to update latent variable Z {sampling, backprop}
        """
        if n_noise != None:
            self.n_noise = n_noise
            self.n_source = n_noise + 1
        if n_iteration != None:
            self.n_iteration = n_iteration
        if n_Z_iteration != None:
            self.n_Z_iteration = n_Z_iteration
        if n_basis_noise != None:
            self.n_basis_noise = n_basis_noise
        if init_SCM != None:
            self.init_SCM = init_SCM
        if mode_update_Z != None:
            self.mode_update_Z = mode_update_Z

    # 初始化功率谱密度（子类重写）
    def initialize_PSD(self):
        """
        initialize parameters related to power spectral density (PSD)
        W, H, U, V, Z
        """
        power_observation_FT = (self.xp.abs(self.X_FTM) ** 2).mean(axis=2)
        shape = 2
        # 噪声的W 用dirichlet随机数初始化
        self.W_noise_NnFK = self.xp.random.dirichlet(np.ones(self.n_freq)*shape, size=[self.n_noise, self.n_basis_noise]).transpose(0, 2, 1)
        # 噪声的H 用伽马随机数初始化，并限制EPS的下界保证运算稳定
        self.H_noise_NnKT = self.xp.random.gamma(shape, (power_observation_FT.mean() * self.n_freq * self.n_mic / (self.n_noise * self.n_basis_noise)) / shape, size=[self.n_noise, self.n_basis_noise, self.n_time])
        self.H_noise_NnKT[self.H_noise_NnKT < EPS] = EPS

        # u和v(语音的scaling factor和activation) 用全1矩阵初始化(频域归一)
        self.U_F = self.xp.ones(self.n_freq) / self.n_freq
        self.V_T = self.xp.ones(self.n_time)

        # DNN编码器及模型的初始化
        if self.normalize_encoder_input:
            power_observation_FT = power_observation_FT / power_observation_FT.sum(axis=0).mean()

        self.Z_speech_DT = self.speech_VAE.encode_cupy(power_observation_FT.astype(self.xp.float32))
        self.z_link_speech = Z_link(self.Z_speech_DT.T)
        self.z_optimizer_speech = chainer.optimizers.Adam().setup(self.z_link_speech)
        self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

        self.lambda_NFT = self.xp.zeros([self.n_source, self.n_freq, self.n_time])
        self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT  # (3) of [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT  # (5) of [1]


    def make_fileName_suffix(self):
        self.fileName_suffix = "S={}-it={}-itZ={}-Ln={}-D={}-init={}-latent={}-vf={}-fb={}".format(self.n_source, self.n_iteration, self.n_Z_iteration, self.n_basis_noise, self.n_latent, self.init_SCM, self.mode_update_Z, self.voice_frame, self.first_batch_size)

        if hasattr(self, "name_DNN"):
            self.fileName_suffix += "-DNN={}".format(self.name_DNN)

        if hasattr(self, "file_id"):
            self.fileName_suffix += "-ID={}".format(self.file_id)
        else:
            print("====================\n\nWarning: Please set self.file_id\n\n====================")

        print("parameter:", self.fileName_suffix)


    def update(self):
        self.update_UV()
        self.update_Z_speech()
        self.update_WH_noise()
        self.update_CovarianceDiagElement()
        self.udpate_Diagonalizer()
        self.normalize()


    def normalize(self):
        phi_F = self.xp.sum(self.Q_FMM * self.Q_FMM.conj(), axis=(1, 2)).real / self.n_mic  # (37) of [2]
        self.Q_FMM = self.Q_FMM / self.xp.sqrt(phi_F)[:, None, None]  # (26) of [2]
        self.G_NFM = self.G_NFM / phi_F[None, :, None]  # (26) of [2]

        mu_NF = (self.G_NFM).sum(axis=2).real  # (27) of [2]
        self.G_NFM = self.G_NFM / mu_NF[:, :, None]  # (27) of [2]
        self.U_F = self.U_F * mu_NF[0]  # (62) of [1]
        self.W_noise_NnFK = self.W_noise_NnFK * mu_NF[1:][:, :, None]  # (62) of [1]

        nu = self.U_F.sum()  # (63) of [1]
        self.U_F = self.U_F / nu  # (63) of [1]
        self.V_T = nu * self.V_T  # (63) of [1]
        self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT  # (3) of [1]

        nu_NnK = self.W_noise_NnFK.sum(axis=1)  # (64) of [1]
        self.W_noise_NnFK = self.W_noise_NnFK / nu_NnK[:, None]  # (64) of [1]
        self.H_noise_NnKT = self.H_noise_NnKT * nu_NnK[:, :, None]  # (64) of [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS  # (5) of [1]

        self.reset_variable()


    def update_WH_noise(self):  # Fast部分参照 (20-22) of [2]
        tmp1_NnFT = (self.G_NFM[1, :, None] * (self.Qx_power_FTM / (self.Y_FTM ** 2))[None]).sum(axis=3)  # (50)(53) of [1]
        tmp2_NnFT = (self.G_NFM[1, :, None] / self.Y_FTM[None]).sum(axis=3)  # (51)(54) of [1]
        a_W = (self.H_noise_NnKT[:, None] * tmp1_NnFT[:, :, None]).sum(axis=3)  # N F K T M
        b_W = (self.H_noise_NnKT[:, None] * tmp2_NnFT[:, :, None]).sum(axis=3)
        a_H = (self.W_noise_NnFK[..., None] * tmp1_NnFT[:, :, None] ).sum(axis=1) # N F K T M
        b_H = (self.W_noise_NnFK[..., None] * tmp2_NnFT[:, :, None]).sum(axis=1) # N F K T M
        self.W_noise_NnFK = self.W_noise_NnFK * self.xp.sqrt(a_W / b_W)  # (52) of [1]
        self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_H / b_H)  # (55) of [1]

        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS
        self.Y_FTM = (self.lambda_NFT[..., None] * self.G_NFM[:, :, None]).sum(axis=0)


    def update_UV(self):
        a_1 = ((self.V_T[None] * self.power_speech_FT)[:, :, None] * self.Qx_power_FTM * self.G_NFM[0, :, None] / (self.Y_FTM ** 2)).sum(axis=2).sum(axis=1).real  # (44) of [1] & (20) of [2]
        b_1 = ((self.V_T[None] * self.power_speech_FT)[:, :, None] * self.G_NFM[0, :, None] / self.Y_FTM).sum(axis=2).sum(axis=1).real  # (45) of [1] & (20) of [2]
        self.U_F = self.U_F * self.xp.sqrt(a_1 / b_1)  # (46) of [1]
        self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT  # (3) of [1]
        self.Y_FTM = (self.lambda_NFT[..., None] * self.G_NFM[:, :, None]).sum(axis=0)  # below (15) of [2]

        a_1 = ((self.U_F[:, None] * self.power_speech_FT)[:, :, None] * self.Qx_power_FTM * self.G_NFM[0, :, None] / (self.Y_FTM ** 2)).sum(axis=2).sum(axis=0).real  # (47) of [1] & (20) of [2]
        b_1 = ((self.U_F[:, None] * self.power_speech_FT)[:, :, None] * self.G_NFM[0, :, None] / self.Y_FTM).sum(axis=2).sum(axis=0).real  # (48) of [1] & (20) of [2]
        self.V_T = self.V_T * self.xp.sqrt(a_1 / b_1)  # (49) of [1]
        self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT  # (3) of [1]
        self.Y_FTM = (self.lambda_NFT[..., None] * self.G_NFM[:, :, None]).sum(axis=0)  # below (15) of [2]


    def loss_func_Z(self, z, vae, n): # for update Z by backprop
        power_tmp_FT = chf.exp(vae.decode(z).T) + EPS
        Y_tmp_FTM = power_tmp_FT[:, :, None] * self.UVG_FTM+ self.WHG_noise_FTM
        return chf.sum(chf.log(Y_tmp_FTM) + self.Qx_power_FTM / Y_tmp_FTM ) / (self.n_freq * self.n_mic)


    # update语音编码器
    def update_Z_speech(self, var_propose_distribution=1e-4):
        """
        Parameters:
            var_propose_distribution: float
                the variance of the propose distribution

        Results:
            self.Z_speech_DT: self.xp.array [ n_latent x T ]
                the latent variable of each speech
        """
        self.WHG_noise_FTM = (self.lambda_NFT[1:][..., None] * self.G_NFM[1:, :, None]).sum(axis=0)
        self.UVG_FTM = (self.U_F[:, None] * self.V_T[None])[:, :, None] * self.G_NFM[0, :, None]

        if "backprop" in self.mode_update_Z: # acceptance rate is calculated from likelihood
            for it in range(self.n_Z_iteration):
                with chainer.using_config('train', False):
                    self.z_optimizer_speech.update(self.loss_func_Z, self.z_link_speech.z, self.speech_VAE, 0)

            self.Z_speech_DT = self.z_link_speech.z.data.T
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

        if "sampling" in self.mode_update_Z:
            log_var = self.xp.log(self.xp.ones_like(self.Z_speech_DT).astype(self.xp.float32) * var_propose_distribution)
            Z_speech_old_DT = self.Z_speech_DT
            power_old_FTM = self.speech_VAE.decode_cupy(Z_speech_old_DT)[:, :, None]

            for it in range(self.n_Z_iteration):
                Z_speech_new_DT = chf.gaussian(Z_speech_old_DT, log_var).data  # (4) of [1] Zt服从标准高斯先验
                lambda_old_FTM = power_old_FTM * self.UVG_FTM + self.WHG_noise_FTM
                power_new_FTM = self.speech_VAE.decode_cupy(Z_speech_new_DT)[:, :, None]
                lambda_new_FTM = power_new_FTM * self.UVG_FTM + self.WHG_noise_FTM
                acceptance_rate = self.xp.exp((self.Qx_power_FTM * (1 / lambda_old_FTM - 1 / lambda_new_FTM)).sum(axis=2).sum(axis=0) + self.xp.log( ( lambda_old_FTM / lambda_new_FTM ).prod(axis=2).prod(axis=0) ) )
                # 随机生成0~1的长度为T的向量，小于接收比的帧接收，记录接收帧的index
                accept_flag = self.xp.random.random([self.n_time]) < acceptance_rate
                # 接收的帧用new更新old，未接收的帧保持old
                Z_speech_old_DT[:, accept_flag] = Z_speech_new_DT[:, accept_flag]
                power_old_FTM[:, accept_flag] = power_new_FTM[:, accept_flag]

            self.Z_speech_DT = Z_speech_old_DT
            self.z_link_speech.z = chainer.Parameter(self.Z_speech_DT.T)
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

        self.lambda_NFT[0] = self.U_F[:, None] * self.V_T[None] * self.power_speech_FT  # (3) of [1]
        self.Y_FTM = (self.lambda_NFT[..., None] * self.G_NFM[:, :, None]).sum(axis=0)  # below (15) of [2]


    def save_parameter(self, fileName):
        param_list = [self.lambda_NFT, self.G_NFM, self.Q_FMM, self.U_F, self.V_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT]
        if self.xp != np:
            param_list = [self.convert_to_NumpyArray(param) for param in param_list]
        pic.dump(param_list, open(fileName, "wb"))


    def load_parameter(self, fileName):
        param_list = pic.load(open(fileName, "rb"))
        if self.xp != np:
            param_list = [cuda.to_gpu(param) for param in param_list]
        self.lambda_NFT, self.G_NFM, self.Q_FMM, self.U_F, self.V_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT = param_list


    def solve(self, n_iteration=100, save_likelihood=False, save_parameter=False, save_wav=False, save_inter_wav=False, save_path="./", interval_save_parameter=30, mic_index=MIC_INDEX):
        """
        Parameters:
            save_likelihood: boolean
                save likelihood and lower bound or not
            save_parameter: boolean
                save parameter or not
            save_wav: boolean
                save intermediate separated signal or not
            save_path: str
                directory for saving data
            interval_save_parameter: int
                interval of saving parameter
        """
        self.n_iteration = n_iteration

        self.initialize_PSD()
        self.initialize_covarianceMatrix()
        self.make_fileName_suffix()

        log_likelihood_array = []
        for it in progressbar(range(self.n_iteration)):
            self.update()

            if save_parameter and ((it + 1) % interval_save_parameter == 0) and ((it + 1) != self.n_iteration):
                self.save_parameter(save_path + "{}-parameter-{}-{}.pic".format(self.method_name, self.fileName_suffix, it + 1))

            if save_inter_wav and ((it + 1) % interval_save_parameter == 0) and ((it + 1) != self.n_iteration):
                self.separate_FastWienerFilter(mic_index=MIC_INDEX)
                self.save_separated_signal(save_path + "{}-sep-Wiener-{}-{}.wav".format(self.method_name, self.fileName_suffix, it + 1))

            if save_likelihood and ((it + 1) % interval_save_parameter == 0) and ((it + 1) != self.n_iteration):
                log_likelihood_array.append(self.calculate_log_likelihood())

        if save_parameter:
            self.save_parameter(save_path + "{}-parameter-{}.pic".format(self.method_name, self.fileName_suffix))

        if save_likelihood:
            log_likelihood_array.append(self.calculate_log_likelihood())
            pic.dump(log_likelihood_array, open(save_path + "{}-likelihood-interval={}-{}.pic".format(self.method_name, interval_save_parameter, self.fileName_suffix), "wb"))

        self.separate_FastWienerFilter(mic_index=mic_index)
        if int(self.file_id) == self.total_frame - first_batch_size:
            self.cat_separated_spec()
            self.save_separated_signal(save_path + "{}-sep-Wiener-{}.wav".format(self.method_name, self.fileName_suffix))
        else:
            self.cat_separated_spec()

        return self.all_sep_spec


    def cat_separated_spec(self):
        self.all_sep_spec = np.append(self.all_sep_spec, self.separated_spec[:,:,-1,None], axis=2)


class Z_link(chainer.link.Link):
    def __init__(self, z):
        super(Z_link, self).__init__()

        with self.init_scope():
            self.z = chainer.Parameter(z)


if __name__ == "__main__":
    import soundfile as sf
    import librosa
    import sys, os
    from chainer import serializers
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument( 'input_fileName', type= str, help='filename of the multichannel observed signals')
    parser.add_argument(      '--file_id', type= str, default="None", help='file id')
    parser.add_argument(          '--gpu', type= int, default=     0, help='GPU ID')
    parser.add_argument(     '--n_latent', type= int, default=    16, help='dimention of encoded vector')
    parser.add_argument(      '--n_noise', type= int, default=     1, help='number of noise')
    parser.add_argument('--n_basis_noise', type= int, default=    64, help='number of basis of noise (MODE_noise=NMF)')
    parser.add_argument(     '--init_SCM', type= str, default= "obs", help='unit, obs, ILRMA')
    parser.add_argument(  '--n_iteration', type= int, default=   100, help='number of iteration')
    parser.add_argument('--n_Z_iteration', type= int, default=    30, help='number of update Z iteration')
    parser.add_argument('--mode_update_Z', type= str, default="sampling", help='sampling, sampling2, backprop, backprop2, hybrid, hybrid2')
    parser.add_argument(    '--save_path', type= str, default="..//output//", help='save path of wave file')
    parser.add_argument(     '--save_wav', type= str, default=True, help='save wave file')
    parser.add_argument(     '--dl_ratio', type=float, default= 0.000001, help='diagonal loading ratio')
    parser.add_argument(     '--dect_act', type= int, default=     50, help='energy times of activation detection')
    parser.add_argument('--frequency_ratio', type= float, default=0.25, help='low/high frequency cutoff points')
    parser.add_argument(  '--voice_frame', type= int, default=   30, help='prior voice frame')
    args = parser.parse_args()

    if args.gpu < 0:
        import numpy as xp
    else:
        import cupy as xp
        print("Use GPU " + str(args.gpu))
        chainer.cuda.get_device_from_id(args.gpu).use()

    sys.path.append("../DeepSpeechPrior")
    import network_VAE
    model_fileName = "../DeepSpeechPrior/model-VAE-best-scale=gamma-D={}.npz".format(args.n_latent)
    speech_VAE = network_VAE.VAE(n_latent=args.n_latent)
    serializers.load_npz(model_fileName, speech_VAE)
    name_DNN = "VAE"

    if xp != np:
        speech_VAE.to_gpu()

    wav, fs = sf.read(args.input_fileName)
    # 判断是否是单声道音频
    if wav.ndim == 1:
        wav = wav[:, None]
    wav = wav.T
    M = len(wav)
    for m in range(M):
        tmp = librosa.core.stft(wav[m], n_fft=1024, hop_length=256)
        if m == 0:
            spec_all = np.zeros([tmp.shape[0], tmp.shape[1], M], dtype=np.complex)
        spec_all[:, :, m] = tmp

    T = spec_all.shape[1]
    F = spec_all.shape[0]
    dect_rho = args.dect_act  # 激活检测的低频高频能量倍数
    freq_point = args.frequency_ratio  # 高低频的两个分界
    voice_frame = args.voice_frame  # 最近的n个语音帧作为先验
    LF = round(freq_point * F)
    HF = round((1-freq_point) * F)
    LF_energy = np.zeros(T)
    HF_energy = np.zeros(T)
    Frame_energy = np.zeros(T)
    voice_flag = np.zeros(T)
    for t in range(T):
        LF_energy[t] = (abs(spec_all[:LF, t, :]) ** 2).sum(axis=(0, 1))
        HF_energy[t] = (abs(spec_all[HF:, t, :]) ** 2).sum(axis=(0, 1))
        Frame_energy[t] = (abs(spec_all[:, t, :]) ** 2).sum(axis=(0, 1))
        if LF_energy[t] > (HF_energy[t] * dect_rho) and Frame_energy[t] > 500:
            voice_flag[t] = 1

    for t in range(T):
        if Frame_energy[t] > 0.1:
            first_batch_size = t
            break
    all_sep_spec = np.zeros([args.n_noise+1, F, first_batch_size], dtype=np.complex)

    for t in range(first_batch_size, T):
        args.file_id = t - first_batch_size + 1
        voice_num = sum(voice_flag[:t+1])  # 当前batch语音帧数量
        if voice_num < round(voice_frame/2):  # 当前batch语音帧数量小于设定的一半
            tmp = spec_all[:, :t+1, :]
        else:
            tmp = spec_all[:, t, None, :]
            n = 0
            for tf in range(t-1, -1, -1):
                if voice_flag[tf] == 1:
                    tmp = np.insert(tmp, [0], spec_all[:, tf, None, :], axis=1)
                    n += 1
                    if n == voice_frame:
                        break
        spec = tmp

        separater = FastMNMF_DP_part_new(n_noise=args.n_noise, speech_VAE=speech_VAE, n_Z_iteration=args.n_Z_iteration, n_basis_noise=args.n_basis_noise, xp=xp, init_SCM=args.init_SCM, mode_update_Z=args.mode_update_Z, total_frame=T, all_sep_spec=all_sep_spec, first_batch_size=first_batch_size, voice_frame=voice_frame, dl_ratio=args.dl_ratio)
        separater.load_spectrogram(spec)
        separater.file_id = args.file_id
        separater.fs = fs
        separater.name_DNN = name_DNN
        all_sep_spec = separater.solve(n_iteration=args.n_iteration, save_likelihood=False, save_parameter=False, save_path=args.save_path, interval_save_parameter=25,save_wav=args.save_wav)
