#! /usr/bin/env python3
# coding: utf-8
# Ref. [1] Semi-Supervised Multichannel Speech Enhancement With a Deep Speech Prior
# Ref. [2] Unsupervised Speech Enhancement Based on Multichannel NMF-Informed Beamforming for Noise-Robust Automatic Speech Recognition

# 说明：阅读此代码注释请务必配合FCA.py文件阅读，因为MNMF_DP类是继承自FCA类，所以有部分核心函数来自于FCA

import numpy as np
import chainer
import sys, os
from chainer import cuda, serializers
from chainer import functions as chf
from progressbar import progressbar
import librosa
import soundfile as sf
import pickle as pic

from FCA import FCA
from configure import *


class MNMF_DP(FCA):
    """ Blind Speech Enhancement Using Multichannel Nonnegative Matrix Factorization with a Deep Speech Prior (MNMF-DP)

    X_FTM: the observed complex spectrogram
    covarianceMatrix_NFMM: spatial covariance matrices (SCMs) for each source
    W_noise_NnFK: basis vectors for noise sources (Nn means the number of noise sources)
    H_noise_NnKT: activations for noise sources
    Z_speech_DT: latent variables for speech
    power_speech_FT: power spectra of speech that is the output of DNN(Z_speech_DT)
    lambda_NFT: power spectral densities of each source
        lambda_NFT[0] = U_F * V_T * power_speech_FT
        lambda_NFT[1:] = W_noise_NnFK @ H_noise_NnKT
    """

    ## 类的初始化函数，进行参数定义等
    def __init__(self, speech_VAE=None, n_noise=1, n_Z_iteration=30, n_latent=16, n_basis_noise=2, xp=np, init_SCM="unit", mode_update_parameter=["all", "Z", "one_by_one"][1], mode_update_Z=["sampling", "backprop"][0], normalize_encoder_input=True):
        """ initialize MNMF

        Parameters:
        -----------
            speech_VAE: VAE
                trained speech VAE network
            n_noise: int
                the number of noise sources
            n_Z_iteration: int
                the number of iteration for updating Z per global iteration
            n_latent: int
                the dimension of latent variable Z
            n_basis_noise: int
                the number of bases of each noise source
            xp : numpy or cupy
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_parameter: str
                'all' : update all the variables simultanesouly
                'one_by_one' : update one by one
            mode_update_Z: str
                how to update latent variable Z {sampling, backprop}
            normalize_encoder_input: boolean
                normalize observation to initialize latent variable by feeding the observation into a encoder
        """
        super(MNMF_DP, self).__init__(n_source=n_noise+1, xp=xp, init_SCM=init_SCM, mode_update_parameter=mode_update_parameter)
        self.n_source, self.n_noise, self.n_speech = n_noise+1, n_noise, 1
        self.n_basis_noise = n_basis_noise
        self.n_Z_iteration = n_Z_iteration
        self.n_latent = n_latent
        self.speech_VAE = speech_VAE
        self.mode_update_Z = mode_update_Z
        self.normalize_encoder_input = normalize_encoder_input
        self.method_name = "MNMF_DP"


    ## 也是参数定义的函数
    def set_parameter(self, n_noise=None, n_iteration=None, n_Z_iteration=None, n_basis_noise=None, init_SCM=None, mode_update_parameter=None, mode_update_Z=None):
        """ set parameters

        Parameters:
        -----------
            n_noise: int
                the number of sources
            init_SCM: str
                how to initialize covariance matrix {unit, obs, ILRMA}
            mode_update_parameter: str
                'all' : update all the variables simultanesouly
                'Z' : update variables other than Z and then update Z
                'one_by_one' : update one by one
        """
        if n_noise != None:
            self.n_noise = n_noise
            self.n_source = n_noise + 1
        if n_iteration != None:
            self.n_iteration = n_iteration
        if n_Z_iteration != None:
            self.n_Z_iteration = n_Z_iteration
        if n_basis_noise != None:
            self.n_basis_noise = n_basis_noise
        if init_SCM != None:
            self.init_SCM = init_SCM
        if mode_update_parameter != None:
            self.mode_update_parameter = mode_update_parameter
        if mode_update_Z != None:
            self.mode_update_Z = mode_update_Z


    ## 初始化功率谱密度（子类重写）
    def initialize_PSD(self):
        # lambda 零矩阵初始化 
        self.lambda_NFT = self.xp.zeros([self.n_source, self.n_freq, self.n_time]).astype(self.xp.float)
        # 语音功率谱 0~1之间的随机浮点数初始化
        self.power_speech_FT = self.xp.random.random([self.n_freq, self.n_time]).astype(self.xp.float)
        power_observation_FT = (self.xp.abs(self.X_FTM) ** 2).mean(axis=2)
        shape = 2
        # 噪声的W 用dirichlet随机数初始化
        self.W_noise_NnFK = self.xp.random.dirichlet(np.ones(self.n_freq)*shape, size=[self.n_noise, self.n_basis_noise]).transpose(0, 2, 1)
        # 噪声的H 用伽马随机数初始化，并限制EPS的下界保证运算稳定
        self.H_noise_NnKT = self.xp.random.gamma(shape, (power_observation_FT.mean() * self.n_freq * self.n_mic / (self.n_noise * self.n_basis_noise)) / shape, size=[self.n_noise, self.n_basis_noise, self.n_time])
        self.H_noise_NnKT[self.H_noise_NnKT < EPS] = EPS

        # u和v(语音的scaling factor和activation) 用全1矩阵初始化(频域归一)
        self.u_F = self.xp.ones(self.n_freq) / self.n_freq
        self.v_T = self.xp.ones(self.n_time)

        # DNN编码器及模型的初始化
        if self.normalize_encoder_input:
            power_observation_FT = power_observation_FT / power_observation_FT.sum(axis=0).mean()
        self.Z_speech_DT = self.speech_VAE.encode_cupy(power_observation_FT.astype(self.xp.float32))
        self.z_link_speech = Z_link(self.Z_speech_DT.T)
        self.z_optimizer_speech = chainer.optimizers.Adam().setup(self.z_link_speech)

        self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)
        # (3) of [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT
        # (5) of [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT


    ## 输出音频文件的文件名
    def make_filename_suffix(self):
        self.filename_suffix = "N={}-it={}-itZ={}-Kn={}-D={}-init={}-latent={}-update={}".format(self.n_noise, self.n_iteration, self.n_Z_iteration, self.n_basis_noise, self.n_latent, self.init_SCM, self.mode_update_Z, self.mode_update_parameter)

        if hasattr(self, "name_DNN"):
           self.filename_suffix += "-DNN={}".format(self.name_DNN)
        else:
           self.filename_suffix += "-DNN=NoName"

        if hasattr(self, "file_id"):
           self.filename_suffix += "-ID={}".format(self.file_id)
        else:
            print("====================\n\nWarning: Please set self.file_id\n\n====================")

        print("filename_suffix:", self.filename_suffix)


    ## 子类重写的update函数，包含算法所有参数更新(重点)
    def update(self):
        if self.mode_update_parameter == "one_by_one":
            self.update_axiliary_variable()
            self.update_W_noise()
            self.update_axiliary_variable()
            self.update_H_noise()
            self.update_axiliary_variable()
            self.update_covarianceMatrix()
            self.update_axiliary_variable()
            self.update_U()
            self.update_axiliary_variable()
            self.update_V()
            self.update_axiliary_variable()
            self.update_Z_speech(calc_constant=True)
            self.normalize()
        elif self.mode_update_parameter == "all":
            self.update_axiliary_variable_and_Z()
            self.update_WH_noise()
            self.update_covarianceMatrix()
            self.update_UV()
            self.update_Z_speech(calc_constant=False)
            self.normalize()
        elif self.mode_update_parameter == "Z":
            self.update_axiliary_variable_and_Z()
            self.update_WH_noise()
            self.update_covarianceMatrix()
            self.update_UV()
            self.update_Z_speech(calc_constant=True)
            self.normalize()


    ## 以下均为参数update函数
    def update_axiliary_variable_and_Z(self):
        # (14) of [1]
        Y_NFTMM = self.lambda_NFT[..., None, None] * self.covarianceMatrix_NFMM[:, :, None]
        if self.xp == np:
            self.Yinv_FTMM = np.linalg.inv(Y_NFTMM.sum(axis=0)) # (Y_ft)^-1 in [1]
            Yx_FTM1 = self.Yinv_FTMM @ self.X_FTM[..., None] # (Y_ft)^-1 * x_ft
            # (Y_ft)^-1 * X_ft * (Y_ft)^-1
            self.Yinv_X_Yinv_FTMM = Yx_FTM1 @ Yx_FTM1.conj().transpose(0, 1, 3, 2) # for reducing computational cost in case of CPU
            # (G_nf)^-1
            cov_inv_FMM = np.linalg.inv(self.covarianceMatrix_NFMM[0])
        else:
            self.Yinv_FTMM = self.calculateInverseMatrix(Y_NFTMM.sum(axis=0))
            Yx_FTM1 = self.Yinv_FTMM @ self.X_FTM[..., None]
            self.Yinv_X_Yinv_FTMM = Yx_FTM1 @ Yx_FTM1.conj().transpose(0, 1, 3, 2) # for reducing computational cost in case of CPU
            cov_inv_FMM = self.calculateInverseMatrix(self.covarianceMatrix_NFMM[0])
        
        # trace term in (44) (47) (50) (53) of [1]
        self.tr_Cov_Yinv_X_Yinv_NFT = self.xp.trace(self.covarianceMatrix_NFMM[:, :, None] @ self.Yinv_X_Yinv_FTMM[None], axis1=3, axis2=4).real
        # trace term in (45) (48) (51) (54) of [1]
        self.tr_Cov_Yinv_NFT = self.xp.trace(self.covarianceMatrix_NFMM[:, :, None] @ self.Yinv_FTMM[None], axis1=3, axis2=4).real

        # (38) of [1]
        Phi_FTMM = Y_NFTMM[0] @ self.Yinv_FTMM
        # last trace term in (42) of [1]
        self.tr_Omega_Cov_FT = self.tr_Cov_Yinv_NFT[0]
        # first trace term in (42) of [1]
        self.tr_Cov_Phi_X_Phi_FT = self.xp.trace(cov_inv_FMM[:, None] @ Phi_FTMM @ self.XX_FTMM @ Phi_FTMM.transpose(0, 1, 3, 2).conj(), axis1=2, axis2=3).real
        # u*v
        self.UV_FT = self.u_F[:, None] * self.v_T[None]


    def update_UV(self):
        # (47) & (48) in [1]
        a_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=0)
        b_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=0)

        # (44) & (45) in [1]
        a_2 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=1)
        b_2 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=1)

        # (49) in [1]
        self.v_T = self.v_T * self.xp.sqrt(a_1 / b_1)
        # (46) in [1]
        self.u_F = self.u_F * self.xp.sqrt(a_2 / b_2)


    def update_U(self):
        # (44) (45) (46) in [1]
        a_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=1)
        b_1 = (self.v_T[None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=1)
        self.u_F = self.u_F * self.xp.sqrt(a_1 / b_1)
        # (3) in [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT


    def update_V(self):
        # (47) (48) (49) in [1]
        a_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_X_Yinv_NFT[0]).sum(axis=0)
        b_1 = (self.u_F[:, None] * self.power_speech_FT * self.tr_Cov_Yinv_NFT[0]).sum(axis=0)
        self.v_T = self.v_T * self.xp.sqrt(a_1 / b_1)
        # u*v
        self.UV_FT = self.u_F[:, None] * self.v_T[None]
        # (3) in [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT


    def update_WH_noise(self):
        if self.xp == np: # CPU，用numpy（没有显卡的话）
            a_2 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            b_2 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])

            for f in range(self.n_freq):
                # (50) in [1]
                a_1 = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, :, None]).sum(axis=1) # Nn K
                # (51) in [1]
                b_1 = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_NFT[1:, f, :, None]).sum(axis=1) # Nn K

                # (53) in [1]
                a_2 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, None]) # Nn K T
                # (54) in [1]
                b_2 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_NFT[1:, f, None]) # Nn K T
                
                # (52) in [1]
                self.W_noise_NnFK[:, f] = self.W_noise_NnFK[:, f] * self.xp.sqrt(a_1 / b_1)
            # (55) in [1]
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_2 / b_2)
        else: # GPU，用cupy，需要显卡加速
            # (50) in [1]
            a_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, :, None]).sum(axis=2) # Nn F K
            # (51) in [1]
            b_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_NFT[1:, :, :, None]).sum(axis=2) # Nn F K

            # (53) in [1]
            a_2 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T
            # (54) in [1]
            b_2 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T

            # (52) in [1]
            self.W_noise_NnFK = self.W_noise_NnFK * self.xp.sqrt(a_1 / b_1)
            # (55) in [1]
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_2 / b_2)


    def update_H_noise(self):
        if self.xp == np: # CPU
            a_1 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            b_1 = self.xp.zeros([self.n_noise, self.n_basis_noise, self.n_time])
            for f in range(self.n_freq):
                # (53) in [1]
                a_1 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, None]) # Nn K T
                # (54) in [1]
                b_1 += (self.W_noise_NnFK[:, f, :, None] * self.tr_Cov_Yinv_NFT[1:, f, None]) # Nn K T
            # (55) in [1]
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_1 / b_1)
        else: # GPU
            # (53) in [1]
            a_1 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T
            # (54) in [1]
            b_1 = (self.W_noise_NnFK[..., None] * self.tr_Cov_Yinv_NFT[1:, :, None]).sum(axis=1) # Nn K T
            # (55) in [1]
            self.H_noise_NnKT = self.H_noise_NnKT * self.xp.sqrt(a_1 / b_1)
        # (5) in [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    def update_W_noise(self):
        if self.xp == np: # CPU
            for f in range(self.n_freq):
                # (50) in [1]
                a_1 = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_X_Yinv_NFT[1:, f, :, None]).sum(axis=1) # Nn K
                # (51) in [1]
                b_1 = (self.H_noise_NnKT.transpose(0, 2, 1) * self.tr_Cov_Yinv_NFT[1:, f, :, None]).sum(axis=1) # Nn K
                # (52) in [1]
                self.W_noise_NnFK[:, f] = self.W_noise_NnFK[:, f] * self.xp.sqrt(a_1 / b_1)
        else: # GPU
            # (50) in [1]
            a_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_X_Yinv_NFT[1:, :, :, None]).sum(axis=2) # Nn F K
            # (51) in [1]
            b_1 = (self.H_noise_NnKT.transpose(0, 2, 1)[:, None] * self.tr_Cov_Yinv_NFT[1:, :, :, None]).sum(axis=2) # Nn F K
            # (52) in [1]
            self.W_noise_NnFK = self.W_noise_NnFK * self.xp.sqrt(a_1 / b_1)
        # (5) in [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    ## 归一化
    def normalize(self):
        # (62) in [1]
        mu_NF = self.xp.trace(self.covarianceMatrix_NFMM, axis1=2, axis2=3).real
        self.covarianceMatrix_NFMM = self.covarianceMatrix_NFMM / mu_NF[:, :, None, None]
        self.u_F = self.u_F * mu_NF[0]
        self.W_noise_NnFK = self.W_noise_NnFK * mu_NF[1:][:, :, None]

        # (63) in [1]
        nu = self.u_F.sum()
        self.u_F = self.u_F / nu
        self.v_T = nu * self.v_T

        # (64) in [1]
        nu_NnK = self.W_noise_NnFK.sum(axis=1)
        self.W_noise_NnFK = self.W_noise_NnFK / nu_NnK[:, None]
        self.H_noise_NnKT = self.H_noise_NnKT * nu_NnK[:, :, None]

        # (3) in [1]
        self.lambda_NFT[0] = self.u_F[:, None] * self.v_T[None] * self.power_speech_FT
        # (5) in [1]
        self.lambda_NFT[1:] = self.W_noise_NnFK @ self.H_noise_NnKT + EPS


    ## 计算loss function
    def loss_func_Z(self, z, vae, n):
        # (3) in [1]
        power_FT = chf.exp(vae.decode(z).T) * self.UV_FT + EPS
        if n == 0:
            # (35) in [1]
            loss = chf.sum(1 / power_FT * self.tr_Cov_Phi_X_Phi_FT + power_FT * self.tr_Omega_Cov_FT)
        else:
            raise NotImplementedError
        return loss


    ## update语音编码器
    def update_Z_speech(self, var_propose_distribution=1e-4, calc_constant=True):
        """
        Parameters:
            var_propose_distribution: float
                the variance of the propose distribution

        Results:
            self.Z_speech_DT: self.xp.array [ n_latent x T ]
                the latent variable of each speech
        """
        if calc_constant: # 计算公式(41)中的trace项
            self.calculate_constant_for_update_Z()

        if "backprop" in self.mode_update_Z: # acceptance rate is calculated from likelihood, 来自chainer自带算法
            for it in range(self.n_Z_iteration):
                with chainer.using_config('train', False):
                    self.z_optimizer_speech.update(self.loss_func_Z, self.z_link_speech.z, self.speech_VAE, 0)

            self.Z_speech_DT = self.z_link_speech.z.data.T
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

        if "sampling" in self.mode_update_Z:
            log_var = self.xp.log(self.xp.ones_like(self.Z_speech_DT).astype(self.xp.float32) * var_propose_distribution)
            Z_speech_old_DT = self.Z_speech_DT
            lambda_speech_old_FT = self.speech_VAE.decode_cupy(Z_speech_old_DT) * self.UV_FT  # (3) of [1]
            for it in range(self.n_Z_iteration):
                Z_speech_new_DT = chf.gaussian(Z_speech_old_DT, log_var).data  # (4) of [1] Zt服从标准高斯先验
                lambda_speech_new_FT = self.speech_VAE.decode_cupy(Z_speech_new_DT) * self.UV_FT  # (3) of [1]
                # exp type of (41) in [1]
                acceptance_rate =  self.xp.exp((-1 * (1/lambda_speech_new_FT - 1/lambda_speech_old_FT) * self.tr_Cov_Phi_X_Phi_FT -  (lambda_speech_new_FT - lambda_speech_old_FT) * self.tr_Omega_Cov_FT).sum(axis=0) - (Z_speech_new_DT ** 2 - Z_speech_old_DT ** 2).sum(axis=0)/2)
                # 随机生成0~1的长度为T的向量，小于接收比的帧接收，记录接收帧的index
                acceptance_boolean = self.xp.random.random([self.n_time]) < acceptance_rate
                # 接收的帧用new更新old，未接收的帧保持old
                Z_speech_old_DT[:, acceptance_boolean] = Z_speech_new_DT[:, acceptance_boolean]
                lambda_speech_old_FT[:, acceptance_boolean] = lambda_speech_new_FT[:, acceptance_boolean]

            self.Z_speech_DT = Z_speech_old_DT
            self.z_link_speech.z = chainer.Parameter(self.Z_speech_DT.T)
            self.power_speech_FT = self.speech_VAE.decode_cupy(self.Z_speech_DT)

    
    ## 计算公式(41)中的trace项
    def calculate_constant_for_update_Z(self): 
        # Yftn = λftn * Gnf
        Y_NFTMM = self.lambda_NFT[..., None, None] * self.covarianceMatrix_NFMM[:, :, None]
        if self.xp == np:
            # Yft^-1
            self.Yinv_FTMM = np.linalg.inv(Y_NFTMM.sum(axis=0))
            # G0f^-1
            cov_inv_FMM = np.linalg.inv(self.covarianceMatrix_NFMM[0])
        else: # use GPU
            self.Yinv_FTMM = self.calculateInverseMatrix(Y_NFTMM.sum(axis=0))
            cov_inv_FMM = self.calculateInverseMatrix(self.covarianceMatrix_NFMM[0])

        # (38) in [1]
        Phi_FTMM = Y_NFTMM[0] @ self.Yinv_FTMM
        # (41) in [1] 第二行的trace项
        self.tr_Omega_Cov_FT = self.xp.trace(self.covarianceMatrix_NFMM[0, :, None] @ self.Yinv_FTMM, axis1=2, axis2=3).real
        # (41) in [1] 第一行的trace项
        self.tr_Cov_Phi_X_Phi_FT = self.xp.trace(cov_inv_FMM[:, None] @ Phi_FTMM @ self.XX_FTMM @ Phi_FTMM.transpose(0, 1, 3, 2).conj(), axis1=2, axis2=3).real


    ## 保存仿真参数，通过save_parameter控制
    def save_parameter(self, filename):
        param_list = [self.covarianceMatrix_NFMM, self.lambda_NFT]
        param_list.extend([self.u_F, self.v_T, self.Z_speech_DT])
        param_list.append([self.W_noise_NnFK, self.H_noise_NnKT])

        if self.xp != np:
            param_list = [cuda.to_cpu(param) for param in param_list]

        pic.dump(param_list, open(filename, "wb"))


    ## 加载仿真参数，只是定义了但是没用到，需要自己编写调用
    def load_parameter(self, filename):
        param_list = pic.load(open(filename, "rb"))
        if self.xp != np:
            param_list = [cuda.to_gpu(param) for param in param_list]

        self.covarianceMatrix_NFMM, self.lambda_NFT, self.u_F, self.v_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT = param_list
        self.n_source, self.n_freq, self.n_time = self.lambda_NFT.shape
        self.n_mic = self.covarianceMatrix_NFMM.shape[-1]
        self.n_latent = self.Z_speech_DT.shape[0]
        self.n_noise, self.n_speech = self.n_source - 1, 1



class Z_link(chainer.link.Link):
    def __init__(self, z):
        super(Z_link, self).__init__()

        with self.init_scope():
            self.z = chainer.Parameter(z)


## 主函数，程序从此开始运行
if __name__ == "__main__":
    # 定义命令行运行的输入参数，包括命令行输入名称，数据类型，默认值和帮助说明，其中input_fileName没有默认值，所以每次运行此代码需要自行指定输入音频的位置
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument(         'input_fileName', type= str, help='filename of the multichannel observed signals')
    parser.add_argument(              '--file_id', type= str, default="None", help='file id')
    parser.add_argument(                  '--gpu', type= int, default=     -1, help='GPU ID')##
    parser.add_argument(                '--n_fft', type= int, default=  1024, help='number of frequencies')
    parser.add_argument(              '--n_noise', type= int, default=     1, help='number of noise')
    parser.add_argument(             '--n_latent', type= int, default=    16, help='dimention of encoded vector')
    parser.add_argument(        '--n_basis_noise', type= int, default=    64, help='number of basis of noise (MODE_noise=NMF)')
    parser.add_argument(             '--init_SCM', type=  str, default="obs", help='unit, obs, ILRMA')
    parser.add_argument(          '--n_iteration', type= int, default=    30, help='number of iteration')
    parser.add_argument(        '--n_Z_iteration', type= int, default=    30, help='number of update Z iteration')
    parser.add_argument(        '--mode_update_Z', type= str, default="sampling", help='sampling, sampling2, backprop, backprop2, hybrid, hybrid2')
    parser.add_argument('--mode_update_parameter', type= str, default= "all", help='all, one_by_one')
    parser.add_argument(            '--save_path', type= str, default="..\\output\\", help='save path of wave file')
    parser.add_argument(             '--save_wav', type= str, default=True, help='save wave file')
    args = parser.parse_args()


    # 调用训练过的VAE
    sys.path.append("../DeepSpeechPrior")
    import network_VAE
    model_fileName = "../DeepSpeechPrior/model-VAE-best-scale=gamma-D={}.npz".format(args.n_latent)
    speech_VAE = network_VAE.VAE(n_latent=args.n_latent)
    serializers.load_npz(model_fileName, speech_VAE)
    name_DNN = "VAE"

    # 如果没有GPU的话，务必指定输入参数中的--gpu<0，这样就不会使用cupy而使用numpy，避免报错
    if args.gpu < 0:
        import numpy as xp
    else:
        import cupy as xp
        print("Use GPU " + str(args.gpu))
        cuda.get_device_from_id(args.gpu).use()
        speech_VAE.to_gpu()

    # 读取音频信号，做STFT
    wav, fs = sf.read(args.input_fileName)
    wav = wav.T
    M = len(wav)
    for m in range(M):
        tmp = librosa.core.stft(wav[m], n_fft=args.n_fft, hop_length=int(args.n_fft/4))
        if m == 0:
            spec = np.zeros([tmp.shape[0], tmp.shape[1], M], dtype=np.complex)
        spec[:, :, m] = tmp

    # 初始化separater为一个MNMF_DP对象，并且传递好所需参数
    separater = MNMF_DP(n_noise=args.n_noise, n_Z_iteration=args.n_Z_iteration, speech_VAE=speech_VAE, n_latent=args.n_latent, n_basis_noise=args.n_basis_noise, xp=xp, init_SCM=args.init_SCM, mode_update_parameter=args.mode_update_parameter)

    # 计算各种频谱
    separater.load_spectrogram(spec)
    # 文件的标记和DNN的名称定义
    separater.name_DNN = name_DNN
    separater.file_id = args.file_id
    separater.fs = fs
    # 核心算法功能在solve函数里实现，包括参数更新、分离、数据和音频保存等。（此函数在FCA.py中）
    separater.solve(n_iteration=args.n_iteration, save_likelihood=False, save_parameter=False, save_path=args.save_path, interval_save_parameter=100, save_wav=args.save_wav)
