# 标准在线MNMF-DP，未完成
#! /usr/bin/env python3
# coding: utf-8
# Ref. [1] Semi-Supervised Multichannel Speech Enhancement With a Deep Speech Prior
# Ref. [2] Unsupervised Speech Enhancement Based on Multichannel NMF-Informed Beamforming for Noise-Robust Automatic Speech Recognition

import numpy as np
import chainer
import sys, os
from chainer import cuda, serializers
from chainer import functions as chf
from progressbar import progressbar
import librosa
import soundfile as sf
import pickle as pic

from FCA_online import FCA_online
from configure import *


class MNMF_DP_online(FCA_online):
    """ Blind Speech Enhancement Using Multichannel Nonnegative Matrix Factorization with a Deep Speech Prior (MNMF-DP)

    X_FTM: the observed complex spectrogram
    covarianceMatrix_NFMM: spatial covariance matrices (SCMs) for each source
    W_noise_NnFK: basis vectors for noise sources (Nn means the number of noise sources)
    H_noise_NnKT: activations for noise sources
    Z_speech_DT: latent variables for speech
    power_speech_FT: power spectra of speech that is the output of DNN(Z_speech_DT)
    lambda_NFT: power spectral densities of each source
        lambda_NFT[0] = U_F * V_T * power_speech_FT
        lambda_NFT[1:] = W_noise_NnFK @ H_noise_NnKT
    """

    def __init__(self, speech_VAE=None, n_noise=1, n_Z_iteration=30, n_latent=16, n_basis_noise=2, xp=np, init_SCM="unit", mode_update_parameter=["all", "Z", "one_by_one"][1], mode_update_Z=["sampling", "backprop"][0], normalize_encoder_input=True, total_frame=0, all_sep_spec=0, first_batch_size=7, weight_rho=0.9):
        """ initialize MNMF

        Parameters:
        -----------
            speech_VAE: VAE
                trained speech VAE network
            n_noise: int
                the number of noise sources
            n_Z_iteration: int
                the number of iteration for updating Z per global iteration
            n_latent: int
                the dimension of latent variable Z
            n_basis_noise: int
                the number of bases of each noise source
            xp : numpy or cupy
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_parameter: str
                'all' : update all the variables simultanesouly
                'one_by_one' : update one by one
            mode_update_Z: str
                how to update latent variable Z {sampling, backprop}
            normalize_encoder_input: boolean
                normalize observation to initialize latent variable by feeding the observation into a encoder
        """
        super(MNMF_DP_online, self).__init__(n_source=n_noise+1, xp=xp, init_SCM=init_SCM, mode_update_parameter=mode_update_parameter)
        self.n_source, self.n_noise, self.n_speech = n_noise+1, n_noise, 1
        self.n_basis_noise = n_basis_noise
        self.n_Z_iteration = n_Z_iteration
        self.n_latent = n_latent
        self.speech_VAE = speech_VAE
        self.mode_update_Z = mode_update_Z
        self.normalize_encoder_input = normalize_encoder_input
        self.method_name = "MNMF_DP_online"
        self.total_frame = total_frame
        self.all_sep_spec = all_sep_spec
        self.first_batch_size = first_batch_size
        self.weight_rho = weight_rho


    def set_parameter(self, n_noise=None, n_iteration=None, n_Z_iteration=None, n_basis_noise=None, init_SCM=None, mode_update_parameter=None, mode_update_Z=None):
        """ set parameters

        Parameters:
        -----------
            n_noise: int
                the number of sources
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_parameter: str
                'all' : update all the variables simultanesouly
                'Z' : update variables other than Z and then update Z
                'one_by_one' : update one by one
        """
        if n_noise != None:
            self.n_noise = n_noise
            self.n_source = n_noise + 1
        if n_iteration != None:
            self.n_iteration = n_iteration
        if n_Z_iteration != None:
            self.n_Z_iteration = n_Z_iteration
        if n_basis_noise != None:
            self.n_basis_noise = n_basis_noise
        if init_SCM != None:
            self.init_SCM = init_SCM
        if mode_update_parameter != None:
            self.mode_update_parameter = mode_update_parameter
        if mode_update_Z != None:
            self.mode_update_Z = mode_update_Z

    ## 初始化first_batch的pre_batch参数，均设为0，等效为离线
    def initialize_pre_batch(self):
        # u参数初始化
        self.pre_batch_u_a_1 = self.xp.zeros(self.n_freq)
        self.pre_batch_u_b_1 = self.xp.zeros(self.n_freq)
        self.pre_batch_u_F = self.xp.zeros(self.n_freq)
        # G参数初始化
        self.pre_batch_phi = self.xp.zeros([self.n_source, self.n_freq, self.n_mic, self.n_mic])
        self.pre_batch_psi = self.xp.zeros([self.n_source, self.n_freq, self.n_mic, self.n_mic])
        self.pre_batch_covarianceMatrix_NFMM = self.xp.zeros([self.n_source, self.n_freq, self.n_mic, self.n_mic])
        # W参数初始化
        self.pre_batch_W_noise_NnFK = self.xp.zeros([self.n_noise, self.n_freq, self.n_basis_noise])  # Nn F K
        self.pre_batch_W_a_1 = self.xp.zeros([self.n_noise, self.n_freq, self.n_basis_noise])  # Nn F K
        self.pre_batch_W_b_1 = self.xp.zeros([self.n_noise, self.n_freq, self.n_basis_noise])  # Nn F K


    ## 初始化功率谱密度（子类重写）
    def initialize_PSD(self):
        # lambda 零矩阵初始化    
        self.lambda_NFT = self.xp.zeros([self.n_source, self.n_freq, self.n_time]).astype(self.xp.float)
        # 语音功率谱 0~1之间的随机浮点数初始化
        self.power_speech_FT = self.xp.random.random([self.n_freq, self.n_time]).astype(self.xp.float)
        power_observation_FT = (self.xp.abs(self.X_FTM) ** 2).mean(axis=2)
        shape = 2
        # 噪声的W 用dirichlet随机数初始化
        self.W_noise_NnFK = self.xp.random.dirichlet(np.ones(self.n_freq)*shape, size=[self.n_noise, self.n_basis_noise]).transpose(0, 2, 1)
        # 噪声的H 用伽马随机数初始化，并限制EPS的下界保证运算稳定
        self.H_noise_NnKT = self.xp.random.gamma(shape, (power_observation_FT.mean() * self.n_freq * self.n_mic / (self.n_noise * self.n_basis_noise)) / shape, size=[self.n_noise, self.n_basis_noise, self.n_time])
        self.H_noise_NnKT[self.H_noise_NnKT < EPS] = EPS

        # W的分子分母初始化
        self.W_a_1 = self.xp.zeros([self.n_noise, self.n_freq, self.n_basis_noise])
        self.W_b_1 = self.xp.zeros([self.n_noise, self.n_freq, self.n_basis_noise])

        # u和v(语音的scaling factor和activation) 用全1矩阵初始化(频域归一)
        self.u_F = self.xp.ones(self.n_freq) / self.n_freq
        self.v_T = self.xp.ones(self.n_time)
        
        # DNN编码器及模型的初始化
        if self.normalize_encoder_input:
            power_observation_FT = power_observation_FT / power_observation_FT.sum(axis=0).mean()
        self.Z_speech_DT = self.speech_VAE.encode_cupy(power_observation_FT.astype(self.xp.float32))
        self.z_link_speech = Z_link(self.Z_speech_DT.T)
        self.z_optimizer_speech = chainer.optimizers.Adam().setup(self.z_link_speech)

        self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)
        # (3) of [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT
        # (5) of [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT


    def make_filename_suffix(self):
        self.filename_suffix = "N={}-it={}-itZ={}-Kn={}-D={}-init={}-latent={}-update={}".format(self.n_noise, self.n_iteration, self.n_Z_iteration, self.n_basis_noise, self.n_latent, self.init_SCM, self.mode_update_Z, self.mode_update_parameter)

        if hasattr(self, "name_DNN"):
           self.filename_suffix += "-DNN={}".format(self.name_DNN)
        else:
           self.filename_suffix += "-DNN=NoName"

        if hasattr(self, "file_id"):
            file_id_str = str(self.file_id)
            self.filename_suffix += "-ID={}".format(file_id_str)
        else:
            print("====================\n\nWarning: Please set self.file_id\n\n====================")

        print("filename_suffix:", self.filename_suffix)


    ## 子类重写的update函数，包含算法所有参数更新(重点)
    def update(self):
        if self.mode_update_parameter == "one_by_one":
            self.update_axiliary_variable()
            self.update_W_noise()
            self.update_axiliary_variable()
            self.update_H_noise()
            self.update_axiliary_variable()
            self.update_covarianceMatrix()
            self.update_axiliary_variable()
            self.update_U()
            self.update_axiliary_variable()
            self.update_V()
            self.update_axiliary_variable()
            self.update_Z_speech(calc_constant=True)
            self.normalize()
        elif self.mode_update_parameter == "all":
            self.update_axiliary_variable_and_Z()
            self.update_WH_noise()
            self.update_covarianceMatrix()
            self.update_UV()
            self.update_Z_speech(calc_constant=False)
            self.normalize()
        elif self.mode_update_parameter == "Z":
            self.update_axiliary_variable_and_Z()
            self.update_WH_noise()
            self.update_covarianceMatrix()
            self.update_UV()
            self.update_Z_speech(calc_constant=True)
            self.normalize()


    def update_axiliary_variable_and_Z(self):
        # (14) of [1]
        Y_NFTMM = self.lambda_NFT[..., None, None] * self.covarianceMatrix_NFMM[:, :, None]
        if self.xp == np:
            self.Yinv_FTMM = np.linalg.inv(Y_NFTMM.sum(axis=0)) # (Y_ft)^-1 in [1]
            Yx_FTM1 = self.Yinv_FTMM @ self.X_FTM[..., None] # (Y_ft)^-1 * x_ft
            # (Y_ft)^-1 * X_ft * (Y_ft)^-1
            self.Yinv_X_Yinv_FTMM = Yx_FTM1 @ Yx_FTM1.conj().transpose(0, 1, 3, 2) # for reducing computational cost in case of CPU
            # (G_nf)^-1
            cov_inv_FMM = np.linalg.inv(self.covarianceMatrix_NFMM[0])
        else:
            self.Yinv_FTMM = self.calculateInverseMatrix(Y_NFTMM.sum(axis=0))
            Yx_FTM1 = self.Yinv_FTMM @ self.X_FTM[..., None]
            self.Yinv_X_Yinv_FTMM = Yx_FTM1 @ Yx_FTM1.conj().transpose(0, 1, 3, 2) # for reducing computational cost in case of CPU
            cov_inv_FMM = self.calculateInverseMatrix(self.covarianceMatrix_NFMM[0])

        # trace term in (44) (47) (50) (53) of [1]
        self.tr_Cov_Yinv_X_Yinv_NFT = self.xp.trace(self.covarianceMatrix_NFMM[:, :, None] @ self.Yinv_X_Yinv_FTMM[None], axis1=3, axis2=4).real
        # trace term in (45) (48) (51) (54) of [1]
        self.tr_Cov_Yinv_NFT = self.xp.trace(self.covarianceMatrix_NFMM[:, :, None] @ self.Yinv_FTMM[None], axis1=3, axis2=4).real

        # (38) of [1]
        Phi_FTMM = Y_NFTMM[0] @ self.Yinv_FTMM
        # last trace term in (42) of [1]
        self.tr_Omega_Cov_FT = self.tr_Cov_Yinv_NFT[0]
        # first trace term in (42) of [1]
        self.tr_Cov_Phi_X_Phi_FT = self.xp.trace(cov_inv_FMM[:, None] @ Phi_FTMM @ self.XX_FTMM @ Phi_FTMM.transpose(0, 1, 3, 2).conj(), axis1=2, axis2=3).real
        # u*v
        self.UV_FT = self.u_F[:, None] * self.v_T[None]


    def update_UV(self):  # U的a_1和b_1需要传递给下一个mini_batch
        # (47) & (48) in [1]
        a_2 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=0)
        b_2 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=0)

        # (44) & (45) in [1]
        self.u_a_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=1)
        self.u_b_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=1)
        # online version
        Fj_uau_F = self.u_F * self.u_a_1 * self.u_F + self.weight_rho * self.pre_batch_u_F * self.pre_batch_u_a_1 * self.pre_batch_u_F
        Fj_b_F = self.u_b_1 + self.weight_rho * self.pre_batch_u_b_1

        # (49) in [1]
        self.v_T = self.v_T * self.xp.sqrt(a_2 / b_2)
        # (46) in [1]  online version
        self.u_F = self.xp.sqrt(Fj_uau_F / Fj_b_F)


    def update_U(self):  # U的a_1和b_1需要传递给下一个mini_batch
        # (44) (45) (46) in [1]
        self.u_a_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=1)
        self.u_b_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=1)
        # online version
        Fj_uau_F = self.u_F * self.u_a_1 * self.u_F + self.weight_rho * self.pre_batch_u_F * self.pre_batch_u_a_1 * self.pre_batch_u_F
        Fj_b_F = self.u_b_1 + self.weight_rho * self.pre_batch_u_b_1
        self.u_F = self.xp.sqrt(Fj_uau_F / Fj_b_F)
        # (3) in [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT


    def update_V(self):
        # (47) (48) (49) in [1]
        a_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=0)
        b_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=0)
        self.v_T = self.v_T * self.xp.sqrt(a_1 / b_1)
        # u*v
        self.UV_FT = self.u_F[:, None] * self.v_T[None]
        # (3) in [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT


    def update_WH_noise(self):  # W的a_1和b_1需要传递给下一个mini_batch
        if self.xp == np: # CPU
            a_2 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            b_2 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])

            for f in range(self.n_freq):
                self.W_a_1[:, f] = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, :, None]).sum(axis=1)  # Nn K
                self.W_b_1[:, f] = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_NFT[1:, f, :, None]).sum(axis=1)  # Nn K

                a_2 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, None])  # Nn K T
                b_2 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_NFT[1:, f, None])  # Nn K T

                # online version
                Fj_WaW_NnK = self.W_noise_NnFK[:, f] * self.W_a_1[:, f] * self.W_noise_NnFK[:, f] + self.weight_rho * self.pre_batch_W_noise_NnFK[:, f] * self.pre_batch_W_a_1[:, f] * self.pre_batch_W_noise_NnFK[:, f]
                Fj_b_NnK = self.W_b_1[:, f] + self.weight_rho * self.pre_batch_W_b_1[:, f]
                self.W_noise_NnFK[:, f] = self.xp.sqrt(Fj_WaW_NnK / Fj_b_NnK)
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_2 / b_2)
        else: # GPU
            self.W_a_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, :, None]).sum(axis=2) # Nn F K
            self.W_b_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_NFT[1:, :, :, None]).sum(axis=2)  # Nn F K

            a_2 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, None]).sum(axis=1)  # Nn K T
            b_2 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_NFT[1:, :, None]).sum(axis=1)  # Nn K T

            # online version
            Fj_WaW_NnFK = self.W_noise_NnFK * self.W_a_1 * self.W_noise_NnFK + self.weight_rho * self.pre_batch_W_noise_NnFK * self.pre_batch_W_a_1 * self.pre_batch_W_noise_NnFK
            Fj_b_NnFK = self.W_b_1 + self.weight_rho * self.pre_batch_W_b_1
            self.W_noise_NnFK = self.xp.sqrt(Fj_WaW_NnFK / Fj_b_NnFK)
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_2 / b_2)


    def update_H_noise(self):
        if self.xp == np: # CPU
            a_1 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            b_1 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            for f in range(self.n_freq):
                a_1 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, None]) # Nn K T
                b_1 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_NFT[1:, f, None]) # Nn K T
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_1 / b_1)
        else: # GPU
            a_1 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T
            b_1 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_1 / b_1)
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    def update_W_noise(self):  # W的a_1和b_1需要传递给下一个mini_batch
        if self.xp == np: # CPU
            for f in range(self.n_freq):
                self.W_a_1[:, f] = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, :, None]).sum(axis=1)  # Nn K
                self.W_b_1[:, f] = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_NFT[1:, f, :, None]).sum(axis=1)  # Nn K
                # online version
                Fj_WaW_NnK = self.W_noise_NnFK[:, f] * self.W_a_1[:, f] * self.W_noise_NnFK[:, f] + self.weight_rho * self.pre_batch_W_noise_NnFK[:, f] * self.pre_batch_W_a_1[:, f] * self.pre_batch_W_noise_NnFK[:, f]
                Fj_b_NnK = self.W_b_1[:, f] + self.weight_rho * self.pre_batch_W_b_1[:, f]
                self.W_noise_NnFK[:, f] = self.xp.sqrt(Fj_WaW_NnK / Fj_b_NnK)
        else: # GPU
            self.W_a_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, :, None]).sum(axis=2)  # Nn F K
            self.W_b_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_NFT[1:, :, :, None]).sum(axis=2)  # Nn F K
            # online version
            Fj_WaW_NnFK = self.W_noise_NnFK * self.W_a_1 * self.W_noise_NnFK + self.weight_rho * self.pre_batch_W_noise_NnFK * self.pre_batch_W_a_1 * self.pre_batch_W_noise_NnFK
            Fj_b_NnFK = self.W_b_1 + self.weight_rho * self.pre_batch_W_b_1
            self.W_noise_NnFK = self.xp.sqrt(Fj_WaW_NnFK / Fj_b_NnFK)
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    def normalize(self):
        mu_NF = self.xp.trace(self.covarianceMatrix_NFMM, axis1=2, axis2=3).real
        self.covarianceMatrix_NFMM = self.covarianceMatrix_NFMM / mu_NF[:, :, None, None]
        self.u_F = self.u_F * mu_NF[0]
        self.W_noise_NnFK = self.W_noise_NnFK * mu_NF[1:][:, :, None]

        nu = self.u_F.sum()
        self.u_F = self.u_F / nu
        self.v_T = nu * self.v_T

        nu_NnK = self.W_noise_NnFK.sum(axis=1)
        self.W_noise_NnFK = self.W_noise_NnFK / nu_NnK[:, None]
        self.H_noise_NnKT = self.H_noise_NnKT * nu_NnK[:, :, None]

        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    def loss_func_Z(self, z, vae, n):
        power_FT = chf.exp(vae.decode(z).T) * self.UV_FT + EPS
        if n == 0:
            loss = chf.sum(1 / power_FT * self.tr_Cov_Phi_X_Phi_FT + power_FT * self.tr_Omega_Cov_FT)
        else:
            raise NotImplementedError
        return loss


    def update_Z_speech(self, var_propose_distribution=1e-4, calc_constant=True):
        """
        Parameters:
            var_propose_distribution: float
                the variance of the propose distribution

        Results:
            self.Z_speech_DT: self.xp.array [ n_latent x T ]
                the latent variable of each speech
        """
        if calc_constant:
            self.calculate_constant_for_update_Z()

        if "backprop" in self.mode_update_Z: # acceptance rate is calculated from likelihood
            for it in range(self.n_Z_iteration):
                with chainer.using_config('train', False):
                    self.z_optimizer_speech.update(self.loss_func_Z, self.z_link_speech.z, self.speech_VAE, 0)

            self.Z_speech_DT = self.z_link_speech.z.data.T
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

        if "sampling" in self.mode_update_Z:
            log_var = self.xp.log(self.xp.ones_like(self.Z_speech_DT).astype(self.xp.float32) * var_propose_distribution)
            Z_speech_old_DT = self.Z_speech_DT
            lambda_speech_old_FT = self.speech_VAE.decode_cupy(Z_speech_old_DT) * self.UV_FT
            for it in range(self.n_Z_iteration):
                Z_speech_new_DT = chf.gaussian(Z_speech_old_DT, log_var).data
                lambda_speech_new_FT = self.speech_VAE.decode_cupy(Z_speech_new_DT) * self.UV_FT
                acceptance_rate =  self.xp.exp((-1 * (1/lambda_speech_new_FT - 1/lambda_speech_old_FT) * self.tr_Cov_Phi_X_Phi_FT -  (lambda_speech_new_FT - lambda_speech_old_FT) * self.tr_Omega_Cov_FT).sum(axis=0) - (Z_speech_new_DT ** 2 - Z_speech_old_DT ** 2).sum(axis=0)/2)
                acceptance_boolean = self.xp.random.random([self.n_time]) < acceptance_rate
                Z_speech_old_DT[:, acceptance_boolean] = Z_speech_new_DT[:, acceptance_boolean]
                lambda_speech_old_FT[:, acceptance_boolean] = lambda_speech_new_FT[:, acceptance_boolean]

            self.Z_speech_DT = Z_speech_old_DT
            self.z_link_speech.z = chainer.Parameter(self.Z_speech_DT.T)
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)


    def calculate_constant_for_update_Z(self):
        Y_NFTMM = self.lambda_NFT[..., None, None] * self.covarianceMatrix_NFMM[:, :, None]
        if self.xp == np:
            self.Yinv_FTMM = np.linalg.inv(Y_NFTMM.sum(axis=0))
            cov_inv_FMM = np.linalg.inv(self.covarianceMatrix_NFMM[0])
        else:
            self.Yinv_FTMM = self.calculateInverseMatrix(Y_NFTMM.sum(axis=0))
            cov_inv_FMM = self.calculateInverseMatrix(self.covarianceMatrix_NFMM[0])

        Phi_FTMM = Y_NFTMM[0] @ self.Yinv_FTMM
        self.tr_Omega_Cov_FT = self.xp.trace(self.covarianceMatrix_NFMM[0, :, None] @ self.Yinv_FTMM, axis1=2, axis2=3).real
        self.tr_Cov_Phi_X_Phi_FT = self.xp.trace(cov_inv_FMM[:, None] @ Phi_FTMM @ self.XX_FTMM @ Phi_FTMM.transpose(0, 1, 3, 2).conj(), axis1=2, axis2=3).real


    def save_parameter(self, filename):
        param_list = [self.covarianceMatrix_NFMM, self.lambda_NFT]
        param_list.extend([self.u_F, self.v_T, self.Z_speech_DT])
        param_list.append([self.W_noise_NnFK, self.H_noise_NnKT])

        if self.xp != np:
            param_list = [cuda.to_cpu(param) for param in param_list]

        pic.dump(param_list, open(filename, "wb"))


    def load_parameter(self, filename):
        param_list = pic.load(open(filename, "rb"))
        if self.xp != np:
            param_list = [cuda.to_gpu(param) for param in param_list]

        self.covarianceMatrix_NFMM, self.lambda_NFT, self.u_F, self.v_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT = param_list
        self.n_source, self.n_freq, self.n_time = self.lambda_NFT.shape
        self.n_mic = self.covarianceMatrix_NFMM.shape[-1]
        self.n_latent = self.Z_speech_DT.shape[0]
        self.n_noise, self.n_speech = self.n_source - 1, 1



class Z_link(chainer.link.Link):
    def __init__(self, z):
        super(Z_link, self).__init__()

        with self.init_scope():
            self.z = chainer.Parameter(z)

class pre_batch_init_DP():
    def __init__(self, u_a_1=None, u_b_1=None, u_F=None, W_a_1=None, W_b_1=None, W_noise_NnFK=None, phi=None, psi=None, covarianceMatrix_NFMM=None):
        # u参数的传递
        self.u_a_1 = u_a_1
        self.u_b_1 = u_b_1
        self.u_F = u_F
        # W参数的传递
        self.W_a_1 = W_a_1
        self.W_b_1 = W_b_1
        self.W_noise_NnFK = W_noise_NnFK
        # G参数的传递
        self.phi = phi
        self.psi = psi
        self.covarianceMatrix_NFMM = covarianceMatrix_NFMM



## 主函数，程序从这里开始运行
if __name__ == "__main__":
    # 输入参数定义和默认值
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument(         'input_fileName', type= str, help='filename of the multichannel observed signals')
    parser.add_argument(              '--file_id', type= str, default="None", help='file id')
    parser.add_argument(                  '--gpu', type= int, default=    -1, help='GPU ID')##
    parser.add_argument(                '--n_fft', type= int, default=  1024, help='number of frequencies')
    parser.add_argument(              '--n_noise', type= int, default=     1, help='number of noise')
    parser.add_argument(             '--n_latent', type= int, default=    16, help='dimention of encoded vector')
    parser.add_argument(        '--n_basis_noise', type= int, default=    64, help='number of basis of noise (MODE_noise=NMF)')
    parser.add_argument(             '--init_SCM', type= str, default= "obs", help='unit, obs, ILRMA')
    parser.add_argument(          '--n_iteration', type= int, default=    30, help='number of iteration')
    parser.add_argument(        '--n_Z_iteration', type= int, default=    30, help='number of update Z iteration')
    parser.add_argument(        '--mode_update_Z', type= str, default="sampling", help='sampling, sampling2, backprop, backprop2, hybrid, hybrid2')
    parser.add_argument('--mode_update_parameter', type= str, default= "all", help='all, one_by_one')
    parser.add_argument(            '--save_path', type= str, default="..\\output\\", help='save path of wave file')
    parser.add_argument(     '--first_batch_size', type= int, default=    40, help='frame num of first mini batch')
    parser.add_argument(      '--mini_batch_size', type= int, default=     4, help='frame num of each mini batch')
    parser.add_argument(           '--weight_rho', type= float, default= 0.9, help='online update weight rho of last mini batch')
    args = parser.parse_args()

    # 获取已经训练好的DNN model
    sys.path.append("../DeepSpeechPrior")
    import network_VAE
    model_fileName = "../DeepSpeechPrior/model-VAE-best-scale=gamma-D={}.npz".format(args.n_latent)
    speech_VAE = network_VAE.VAE(n_latent=args.n_latent)
    serializers.load_npz(model_fileName, speech_VAE)
    name_DNN = "VAE"

    # gpu id < 0，则用numpy库运算，否则用cupy(gpu加速)库运算
    if args.gpu < 0:
        import numpy as xp
    else:
        import cupy as xp
        print("Use GPU " + str(args.gpu))
        cuda.get_device_from_id(args.gpu).use()
        speech_VAE.to_gpu()
    
    # 读取wav文件以及STFT
    wav, fs = sf.read(args.input_fileName)
    wav = wav.T
    M = len(wav)
    for m in range(M):
        tmp = librosa.core.stft(wav[m], n_fft=args.n_fft, hop_length=int(args.n_fft/4))
        if m == 0:
            spec_all = np.zeros([tmp.shape[0], tmp.shape[1], M], dtype=np.complex)
        spec_all[:, :, m] = tmp
    
    T = spec_all.shape[1]
    first_batch_size = args.first_batch_size
    mini_batch_size = args.mini_batch_size
    all_sep_spec = 0

    for t in range(first_batch_size,T+1):
        # online分帧
        if t == first_batch_size:
            tmp = spec_all[:,:t,:]
        else:           
            tmp = spec_all[:,t-mini_batch_size:t,:]
        spec = np.zeros([tmp.shape[0], tmp.shape[1], tmp.shape[2]], dtype=np.complex)
        spec = tmp
        args.file_id = t+1-first_batch_size

        # 初始化separater对象
        separater = MNMF_DP_online(n_noise=args.n_noise, n_Z_iteration=args.n_Z_iteration, speech_VAE=speech_VAE, n_latent=args.n_latent, n_basis_noise=args.n_basis_noise, xp=xp, init_SCM=args.init_SCM, mode_update_parameter=args.mode_update_parameter, total_frame=T, all_sep_spec=all_sep_spec, first_batch_size=first_batch_size, weight_rho=args.weight_rho)

        # 初始化STFT频谱(继承自父类FCA_online),包括x_ft和X_ft = x_ft * x_ft^H in [1]
        separater.load_spectrogram(spec)

        # 定义两个命名相关的参数
        separater.name_DNN = name_DNN
        separater.file_id = args.file_id

        separater.fs = fs

        # 定义pre_batch对象，用于存储上一batch的参数值
        if args.file_id == 1:
            pre_batch = pre_batch_init_DP()
        else:
            # u参数的传递
            separater.pre_batch_u_a_1 = pre_batch.u_a_1
            separater.pre_batch_u_b_1 = pre_batch.u_b_1
            separater.pre_batch_u_F = pre_batch.u_F
            # W参数的传递
            separater.pre_batch_W_a_1 = pre_batch.W_a_1
            separater.pre_batch_W_b_1 = pre_batch.W_b_1
            separater.pre_batch_W_noise_NnFK = pre_batch.W_noise_NnFK
            # G参数的传递
            separater.pre_batch_phi = pre_batch.phi
            separater.pre_batch_psi = pre_batch.psi
            separater.pre_batch_covarianceMatrix_NFMM = pre_batch.covarianceMatrix_NFMM

        # solve函数,负责算法核心运算、参数保存和结果输出(继承自父类FCA_online)
        all_sep_spec = separater.solve(n_iteration=args.n_iteration, save_likelihood=False, save_parameter=False, save_path=args.save_path, interval_save_parameter=100)

        # 在线的参数保存，提供给下一个mini-batch
        # u参数的保存
        pre_batch.u_a_1 = separater.u_a_1
        pre_batch.u_b_1 = separater.u_b_1
        pre_batch.u_F = separater.u_F
        # W参数的保存
        pre_batch.W_a_1 = separater.W_a_1
        pre_batch.W_b_1 = separater.W_b_1
        pre_batch.W_noise_NnFK = separater.W_noise_NnFK
        # G参数的保存
        pre_batch.phi = separater.phi
        pre_batch.psi = separater.psi
        pre_batch.covarianceMatrix_NFMM = separater.covarianceMatrix_NFMM