\documentclass{ieeeaccess}
\usepackage{cite}
\usepackage{amsmath,amssymb,amsthm,amsfonts}
\usepackage{algorithmic, algorithm}
\usepackage{graphicx}
\usepackage{caption}
\usepackage{textcomp}
\usepackage{epsfig}
\usepackage{url}
\usepackage{epstopdf}
\usepackage{diagbox}

\usepackage{bm}
\usepackage{longtable,multirow,colortbl,booktabs}
\renewcommand{\algorithmicrequire}{\textbf{Input:}}
\renewcommand{\algorithmicensure}{\textbf{Output:}}
\renewcommand{\algorithmicreturn}{\textbf{Initialization:}}

\newtheorem{theorem}[subsubsection]{Theorem}
\newtheorem{lemma}[subsubsection]{Lemma}
\newtheorem{definition}[subsubsection]{Definition}
\newtheorem{proposition}[section]{Proposition}

\newcommand{\st}{\mathrm{s.t.}}
\newcommand{\diag}{\mathrm{diag}}
\newcommand{\tr}{\mathrm{tr}}
\renewcommand\arraystretch{1.5}
\DeclareMathOperator*{\argmin}{arg\,min}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
    T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\ifCLASSOPTIONcompsoc
    \usepackage[caption=false, font=normalsize, labelfont=sf, textfont=sf]{subfig}
\else
\usepackage[caption=false, font=footnotesize]{subfig}
\fi
\begin{document}
\history{Date of publication xxxx 00, 0000, date of current version xxxx 00, 0000.}
\doi{10.1109/ACCESS.2017.DOI}

\title{Local Sensitive Dual Concept Factorization for Unsupervised Feature Selection}
\author{\uppercase{Liang Du}\authorrefmark{1,2,3}, \IEEEmembership{Member, IEEE},
    \uppercase{Yalong Fan\authorrefmark{1} and Peng Zhou\authorrefmark{4}
}}
\address[1]{School of Computer and Information Technology, Shanxi University, Taiyuan 030006, Shanxi Province, China}
\address[2]{Institute of Big Data Science and Industry, Shanxi University, Taiyuan 030006, Shanxi Province, China}
\address[3]{Key Laboratory of Computational Intelligence and Chinese Information Processing of Ministry of Education, Shanxi University, Taiyuan, 030006, China}
\address[4]{School of of Computer Science and Technology, Anhui University, Hefei 230601, China}
\tfootnote{This work is supported in part by the National Natural Science Foundation of China grant 61502289, 61603230, 61806003, 61802238 and 61872226.and supported by the Natural Science Foundation of Shanxi Province, China grant No.201701D121052 and No.201801D221163.}

\markboth
{Author \headeretal: Preparation of Papers for IEEE TRANSACTIONS and JOURNALS}
{Author \headeretal: Preparation of Papers for IEEE TRANSACTIONS and JOURNALS}

\corresp{Corresponding author: Liang Du (e-mail: csliangdu@gmail.com).}

\begin{abstract}
In this paper, we propose a novel Local Sensitive Dual Concept Learning method for the task of unsupervised feature selection. We first approximate the original data matrix by the new proposed dual concept learning model, which inherits the merit of co-clustering based dual learning mechanism for more interpretable and compact data reconstruction. We then adopt the local sensitive loss function to capture the local structure of data by emphasizing more on most similar pairs with small errors. In this way, our method can achieve better results by more compact data reconstruction and more faithful local structure preserving. An iterative algorithm with convergence guarantee is also developed to find the optimal solution. To validate the effectiveness of the proposed method, we fully investigate the performance improvement by the newly developed terms, individually and simultaneously. Extensive experiments on benchmark datasets further show that the proposed method outperforms many state-of-the-art algorithms.
\end{abstract}

\begin{keywords}
Dual Concept Learning, Local Sensitive Regularization, Corr-entropy Induced Metric, Unsupervised Feature Selection
\end{keywords}

\titlepgskip=-15pt

\maketitle




\section{Introduction}
With the rapid development of data acquisition technology, the huge amounts of high-dimensional data become ubiquitous in a variety of real world applications. These data often contain a lot of irrelevant, noisy, redundant features. Feature selection, as a data preprocessing strategy, have been proven to be effective and efficient to remove these features and only keep a few relevant and informative features, which not only reduces the storage and computational cost while avoids significant loss of information or degradation of subsequent learning performance.

These feature selection algorithms can be broadly classified into supervised, semi-supervised and unsupervised methods according to the availability of supervision. Since it is expensive in both time and effort to acquire labeled data, unsupervised feature selection has gained considerable attention recently. Compared to supervised or semi-supervised counterparts, unsupervised feature selection is generally more challenging due to the lack of supervised information.

Various unsupervised feature selection methods have been developed and can be further roughly categorized in to filter methods, wrapper methods and embedded methods by concerning different selection strategies \cite{ufs_survey}. These methods have well demonstrate that for the task of unsupervised feature selection it is vital important for the selected features to capture the cluster structure of data, to minimize the reconstruction error and to preserve the local structure of data \cite{cgufs3}. Specifically speaking, the cluster structure can be discovered by many clustering algorithms, such as spectral analysis\cite{mcfs,ndfs,glspfs,jelsr2,cufs}, matrix factorization \cite{ndfs,nscr,rufs}, consensus clustering\cite{cgufs}. The original data can be approximated in the form of data reconstruction error minimization, which can be achieved by the self-representation based methods\cite{rsr,rsr2,scufs,hsfs}, the matrix factorization based methods \cite{mffs,eufs,lgspufs}, the dictionary learning based methods \cite{cdlfs}, the subspace learning methods \cite{gloss,jdssl}. The local structure can be characterized in different ways such as LPP \cite{SPNFSR,l1ufs}, LLE \cite{glspfs}, local learning \cite{lgdfs}. These above three main concerns about unsupervised feature selection can be achieved individually or simultaneously.

Although various methods have been developed, there are still at least two problems are not well addressed. On the one hand, the original data should be approximated only from representative samples and relevant features to achieve more faithful and interpretable results. However, the self-representation based methods only take the relevant features from all the candidate samples \cite{rsr,SPNFSR,gsrufs,l1ufs,disr,opsr}. While the matrix factorization and dictionary learning methods estimate the mix-signed and less interpretable coefficients or the basis \cite{hsfs,cdlfs,jdssl,mffs,lrsrufs}. On the other hand, the selected features should preserve the local structure from given data faithfully. However most existing graph based regularization methods take the squared loss function, which is largely determined by pairs with large closeness errors. In fact, most local similar pairs often lead to relative small errors. The loss function and most of small errors are often poor matched for the task of unsupervised feature selection.


In this paper, we propose two novel operators to partially alleviate the above problems. On the one hand, we propose to reconstruct the data matrix via dual concept learning, where the feature-side and sample-side topic are represented by the non-negative linear combination. The main advantages of the dual concept learning are as follows. Firstly, the latent topic or concept obtained from non-negative coefficients is more interpretable. Secondly, the dual learning mechanism can fully explore the duality between sample and feature, which can be used to further improve the clustering results by many existing co-clustering algorithms. Thirdly, the informative features can be selected by imposing the often used group sparsity regularization. On the other hand, we further replace the often used squared loss function with the Corr-entropy Induced Metric (CIM) to evaluate the ability of features for local structure preserving. Compared with the squared loss function which overvalues the large errors, the CIM function emphasizes the relative small errors. Actually, most of errors for local structure preserving come from the similar pairs. As a result, most similar pairs incurs small errors. It is more appropriate to take the corresponding local sensitive loss function, such as CIM, to regulate the local pairs with small errors. By integrating the above novel terms, i.e., the dual concept learning and the local sensitive regularization, we then have the novel Local Sensitive Dual Concept Learning (LSDCL) method for unsupervised feature selection. We further derive the multiplicative update rules for its optimization with convergence guarantee. We take two type of experiments for comparison. We first design four unsupervised feature selection methods by replacing the proposed terms one by one. We can observe that the clustering performance with selected features can be improved gradually. Moreover, extensive experiments on benchmark data sets also well demonstrate the superiority against state-of-the-art methods.


The paper is organized as follows. Section 2 describes some related work. Section 3 presents in detail the proposed method. The optimization algorithms is presented in Section 4. Section 5 shows the experimental results, and Section 6 concludes the paper. In order not to distract from the reading, proofs of the results are moved to Appendix.
\section{Related Works}
In this section, we first present the basic notations. Then we introduce the clustering guided, reconstruction based methods and closely related matrix factorization methods.

\textbf{Notations.} Given the matrix $\mathbf{X} \in \mathcal{R}^{d \times n}$ where $d$ is the number of features and $n$ is the number of samples, the task of unsupervised feature selection is to find a feature subset with size $m$ which contains the most informative features. In this paper, matrices are written as uppercase letters and vectors are written as boldface lower-case letters.  The $\ell_2$-norm of a vector $\mathbf{x}$ is defined as $||\mathbf{x}||_2 = \sqrt{\mathbf{x}^T \mathbf{x}}$. The $\ell_{2,1}$-norm of a matrix is defined as $||\mathbf{X}||_{2,1} = \sum_{i=1}^{n}\sqrt{\sum_{j=1}^{m} \mathbf{X}_{ij}^2} = \sum_{i=1}^{n} ||\mathbf{x}_i||_2$.



\subsection{Clustering guided methods}
The cluster structure is one of the essential underlying structure of data. Many unsupervised feature selection algorithms have been proposed by discovering the cluster structure in different ways, such as spectral analysis\cite{mcfs,ndfs,glspfs,jelsr2,cufs}, subspace clustering, matrix factorization \cite{ndfs,nscr,rufs}, consensus clustering\cite{cgufs}. Similar with the supervised feature selection algorithms, the pseudo labels have been widely used to guide the search of informative features \cite{mcfs,glspfs,jelsr,fsasl}. Generally speaking, these algorithms can be formulated as
\begin{align}
\label{opt_01}
\min_{\mathbf{U}, \mathbf{Y}} \quad \mathbf{\mathcal{L}}^1(\mathbf{Y}) + \lambda_1 \mathbf{\mathcal{L}}^2(\mathbf{Y}, \mathbf{X}^T \mathbf{U}) + \lambda_2\Omega^1(\mathbf{U})_{p,q}
\end{align}
where $\mathbf{U} \in \mathcal{R}^{d \times c}$, $\mathbf{Y} \in \mathcal{R}^{n \times c}$ is the pseudo labels characterizing the cluster structure and $n,d,c$ is the number of samples, features and clusters, respectively. The second term in Eq.~\eqref{opt_01} reflects the alignment between the selected feature and the pseudo label. One of the often used formulations is the spectral regression as $\mathbf{\mathcal{L}}^2(\mathbf{Y}, \mathbf{X}^T \mathbf{U}) = ||\mathbf{Y} - \mathbf{X}^T \mathbf{U}||^2$. The third term is the sparse regularization on the feature weighting matrix $\mathbf{U}$, such as the $\ell_{p,q}$-norm.

Besides the cluster structure, it has been widely recognized that the local structure plays an important role in generating pseudo label and subsequent sparse learning. Note that these graph-based structures are usually constructed in the original high-dimensional feature space with uninformative, noisy and redundant features.  Actually, the intrinsic structure should be only captured by the informative and relevant features. To remedy this limitation, several methods are further developed to adaptively improve the similarity matrix of the local graph structure with the integration of feature selection in a joint learning framework \cite{fsasl,sogfs,URAFS,anfs,ulap}. The idea of adaptive learning have also been developed to improve the existing clustering and dimension reduction algorithms \cite{fakm,larc}.


The sparse-learning-based feature selection methods have gained increasing popularity in recent years. These methods usually directly embed feature selection into certain learning algorithm (such as linear regression, SVM, etc.). Nonetheless, the selected features do not necessary achieve good performance in other learning tasks \cite{ufs_survey}.

\subsection{Reconstruction based methods}
The principle of minimizing the reconstruction error have also been widely used in many unsupervised tasks, such as the learning of deep neural network \cite{hinton2006reducing}, subspace clustering and active learning \cite{cur-fs}. to reveal the underlying intrinsic structure of data. From the data reconstruction perspective, the importance of feature can be characterized by the capability of features to approximate the original data via a reconstruction function. The general formulation of existing reconstruction based methods is
\begin{align}
\min_{\mathbf{U}} \quad \mathbf{\mathcal{L}}^3(\mathbf{X}^T, \mathbf{X}^T \mathbf{U}) + \lambda_1 \mathbf{\mathcal{L}}^4(\mathbf{U}) + \lambda_2 \Omega^2(\mathbf{U})
\end{align}
where $\mathbf{U} \in \mathcal{R}^{d \times d}$, $\mathbf{\mathcal{L}}(\mathbf{X}^T, \mathbf{X}^T \mathbf{U})$ represents the reconstruction error, and the last two terms are adopted for regularization such as the manifold, low-rank and sparse regularization.

Several methods have been proposed by using the self-representation based reconstruction error function \cite{cpfs,rsr,rsr2,disr,gsrufs,SPNFSR,lrsl,opsr,cgufs3,l1ufs}, where each feature is represented as the linear combination of its relevant features according to
\begin{align}\label{fea_reconstruction}
\min_{\mathbf{U}} \quad \mathbf{\mathcal{L}}^3(\mathbf{X}^T, \mathbf{X}^T \mathbf{U}) = || \mathbf{X}^T - \mathbf{X}^T \mathbf{U}||_{2,p}
\end{align}
where $\mathbf{U} \in \mathcal{R}^{d \times d}$, $p=2,1$ for squared reconstruction error or its robust version. These methods often lead to high dimensional weighting matrix. More compact and low-dimensional weighting matrix can be obtained under the framework of matrix factorization \cite{mffs,eufs,gloss,lgspufs}, subspace learning \cite{hsfs}, dictionary learning \cite{cdlfs}, where the reconstruction error can be formulated as
\begin{align}
\min_{\mathbf{U}} \quad \mathbf{\mathcal{L}}^3(\mathbf{X}^T, \mathbf{X}^T \mathbf{U}) = || \mathbf{X}^T - \mathbf{X}^T \mathbf{U} \mathbf{V}^T||_{2,p}
\label{lr_reconstruction}
\end{align}
where $\mathbf{U} \in \mathcal{R}^{d \times k}$ and $\mathbf{V} \in \mathcal{R}^{d \times k}$ are feature side reconstruction coefficients.

\subsection{Other Related Matrix Factorization Methods}
Given a non-negative data matrix $\mathbf{X} \in \mathcal{R}_{+}^{d \times n }$, Xu and Gong \cite{cf} proposed an extension of Nonnegative Matrix Factorization (NMF) \cite{nmf} which is called Concept Factorization (CF). In CF, each cluster is required to be a non-negative linear combination of the sample vectors  i.e., $\mathbf{f}_i = \sum_{j=1}^{n} \mathbf{x}_{j} \mathbf{U}_{ij}$. Therefore, CF aims at solving:
\begin{align}\label{cf}
\min_{\mathbf{U}, \mathbf{V}} \quad ||\mathbf{X} - \mathbf{X} \mathbf{U} \mathbf{V}^T ||^2, \quad \st \quad \mathbf{U} \geq 0, \mathbf{V} \geq 0.
\end{align}
where $\mathbf{U} \in \mathcal{R}_{+}^{n \times k}$ and $\mathbf{V}_{+} \in \mathcal{R}^{n \times k}$ are sample side reconstruction coefficients.

Given a data matrix $\mathbf{X} \in \mathcal{R}^{d \times n }$ of rank $\rho=\mathrm{rank}(\mathbf{X})$, rank parameter $k < \rho$, and accuracy parameter $0 < \epsilon < 1$, the CUR factorization \cite{mahoney2009matrix} for $\mathbf{X}$ aims to find $\mathbf{C} \in \mathcal{R}^{d \times m}$ with $m$ columns from $\mathbf{X}$, $\mathbf{R} \in \mathcal{R}^{r \times n }$ with $r$ rows of $\mathbf{X}$, and $\mathbf{U} \in \mathcal{R}^{m \times r}$, with $m, r$, and $\mathrm{rank}(\mathbf{U})$ being as small as possible, such that $\mathbf{X}$ is reconstructed within relative-error:
\begin{align}
||\mathbf{X} - \mathbf{CUR}||_{F}^{2} \leq(1+\epsilon)||\mathbf{X} - \mathbf{X}_k||_{F}^2,
\end{align}
where $\mathbf{X}_k$ is the best rank-$k$ approximation to $\mathbf{X}$, which can be obtained by the Singular Value Decomposition (SVD).



\section{The Proposed Method}

\subsection{Reconstruction based on Dual Concept Learning}

From the perspective of data reconstruction, each sample can be reconstructed by other samples, and meanwhile each feature should be well reconstructed by other features \cite{rsr2}. To avoid the induced high-dimensional reconstruction weight matrix of feature-based one side self-representation in Eq.~\eqref{fea_reconstruction}, it is more preferred to derive more compact approximation of reconstruction by exploring the low rank structure of data. However most existing one-side matrix factorization methods failed to fully exploit the duality among features and samples. On the one hand, the low rank matrix factorization methods for unsupervised feature selection with Eq.~\eqref{lr_reconstruction} only takes the feature side concept learning into consideration, i.e., each concept of feature $\mathbf{f}_i \in \mathcal{R}^{n \times 1}$ is represented by $\mathbf{f}_i = \sum_{j=1}^{d}\bar{\mathbf{x}}_j^T \mathbf{U}_{ij}$ where $\bar{\mathbf{x}}_j^T \in \mathcal{R}^{n \times 1}$ is the $j$-th feature. On the other hand, the concept factorization in Eq.~\eqref{cf} only considers the sample side concept learning, i.e., each concept of sample $\mathbf{g}_i \in \mathcal{R}^{d \times 1}$ is represented by $\mathbf{g}_i = \sum_{j=1}^{n}\mathbf{x}_j \mathbf{U}_{ij}$.

From the perspective of co-clustering mechanism, data points can be grouped based on their distribution on features, while features can be grouped based on their distribution on the data points \cite{drcc}. It have been shown that co-clustering algorithms is often superior to traditional one-side clustering \cite{drcc,bkm}. However, existing data reconstruction based co-clustering algorithms usually adopt tri-facorization for clustering, where the cluster is represented by the latent factors.

Motivated by the duality between samples and features in data reconstruction and co-clustering, we propose the following two-side Dual Concept Learning to approximate the data matrix
\begin{align}\label{appro}
\mathbf{X} \approx \mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}
\end{align}
where $\mathbf{U}\in \mathcal{R}_{+}^{n \times k}, \mathbf{S}\in \mathcal{R}_{+}^{k \times r},\mathbf{V}\in \mathcal{R}_{+}^{d \times r}$, $\mathbf{X}\mathbf{U} \in \mathcal{R}^{d \times k}$ and $\mathbf{X}^T\mathbf{V} \in \mathcal{R}^{n \times r}$ represent the sample side concept and the feature side concept, respectively.

Compared with the above related work on data reconstruction in Eq.~\eqref{fea_reconstruction} and matrix factorization in Eq.~\eqref{cf}, the newly developed approximation in Eq. \eqref{appro} estimates both the feature and sample side concept to reap the duality between samples and features. Compared with the tri-factorization based co-clustering algorithms in \cite{drcc,bkm}, the above approximation obtain the feature-side and sample-side cluster via non-negative linear combination of the original features and samples, which makes the result more interpretable.

Based on the approximation in Eq.~\eqref{appro}, various functions can be used to characterize the reconstruction error, here we take the squared loss function as an example. The induced Dual Concept Learning for data reconstruction then can be formulated as follows
%  of quadratic function and the correntropy induced metric as an non-quadratic function
\begin{align}\label{obj_p1}
\min_{\mathbf{U, S, V}} \quad& ||\mathbf{X}-\mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^{2} \\
\st \quad & \mathbf{U} \geq 0, \mathbf{S} \geq 0, \mathbf{V} \geq 0. \nonumber
\end{align}

%\begin{align}
%\min_{\mathbf{U, S, V}} \quad& ||\mathbf{X}-\mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^{2} \\
%\st \quad & \mathbf{U} \geq 0, \mathbf{V} \geq 0
%\end{align}


\subsection{Regularization with Local Sensitive Structure Preserving}
It is vital important to well preserve the local structure of data so that samples that were originally in close proximity in the original high-dimensional space remain so within the selected relevant features. Several methods have been developed to construct the neighbor relationship graph and employed to help the task of unsupervised feature selection. The Locality Preserving Projection (LPP) and Local Linear Embedding (LLE) are the two most popular adjacent graph construction and embedding methods \cite{glspfs,SPNFSR,l1ufs}.

In order to characterize the locality of samples for LPP, an
adjacency graph is often constructed by $k$-nearest neighbors. The corresponding similarity matrix $\mathbf{A}$ with $0$-$1$ weighting function can be defined as follows.
\begin{align}
\mathbf{A}_{ij}  = \left\{ \begin{array}{cl}
1 &  \textrm{if } \mathbf{x}_i \textrm{ and } \mathbf{x}_j \textrm{ are neighbors,} \\
0 &  \textrm{otherwise.}
\end{array}
\right.
\end{align}

Given the row-wise sparse linear projection and selection matrix $\mathbf{V}$ in Eq.~\eqref{obj_p1}, the ability of each feature to preserve the local structure of data using LPP is evaluated by the following term with quadratic function
\begin{align}
\min_{\mathbf{V}} \quad& \sum_{i,j=1}^{n} ||\mathbf{V}^T \mathbf{x}_i - \mathbf{V}^T \mathbf{x}_j||^2 \mathbf{A}_{ij} = \sum_{i,j=1}^{n} (e_{ij}^{'})^2 \mathbf{A}_{ij}
\label{lpp_ufs}
\end{align}



\begin{figure}[htbp]
    \centering
    \includegraphics[width=0.45\textwidth]{loss_func}
%    \subfloat[loss  function]{\includegraphics[width=0.45\textwidth]{loss_func}        \label{fig:loss_func}}
    %\subfloat[weight function]{\includegraphics[width=0.25\textwidth]{weight_func}    \label{fig:weight_func}}
    \caption{Comparison of L2 and CIM loss function}
    \label{fig:two_functions}
\end{figure}

The behavior of the squared loss function in Eq.~\eqref{lpp_ufs} is depicted by the red line in Figure \ref{fig:two_functions}. It uses large distance pairs (namely, $e_{ij}^{'}$ is large) to preserve the locality of samples. In other words, Eq.~\eqref{lpp_ufs} only emphasizes the large distance pairs, because when $e_{ij}^{'}$ is large and $\mathbf{A}_{ij}$ is 1, then its contribution to Eq.~\eqref{lpp_ufs} will be quadratic. The aggregated loss in Eq.~\eqref{lpp_ufs} will be largely influenced by even a single large distance pair. As a result, most pairs with small distances are less emphasized. So Eq.~\eqref{lpp_ufs}  cannot characterize the locality of samples well.

To alleviate the problem, we introduce the Corr-entropy Induced Metric (CIM) \cite{correntropy} as a generalized metric based on the information potential of Renyi's quadratic entropy in information-theoretic learning (ITL) \cite{principe2000information}, which can be defined as follows
\begin{align}
\mathbf{\mathcal{L}}(e) = 1 - \exp(-\frac{e^2}{\delta^2})
\end{align}
where $\delta$ is the kernel width as in the Gaussian function. As shown by the black dash line in Figure \ref{fig:two_functions}, when $e_{ij}^{'}$ is small, the behavior of CIM is less steeper, so it can characterize the locality of samples better than L2 function. Besides, unlike the L2 function, the CIM function is upper bounded. As a result, the contribution of each pair in the aggregated loss will also be upper bounded. Compared to the squared loss, the CIM function is more significant in emphasizing small distance pairs. Thus, we replace the squared loss function in Eq.~\eqref{lpp_ufs} with the small error sensitive CIM loss function to better preserve the local structure of data and get the following local sensitive regularization term
\begin{align}\label{obj_p2}
\min_{\mathbf{V}} \quad& \sum_{i=1}^{n} \sum_{j=1}^{n} \mathbf{A}_{ij}\left(1-\exp^{\left(-\frac{||\mathbf{V}^T\mathbf{x}_{i}-\mathbf{V}^T\mathbf{x}_{j}||^{2}}{\delta^{2}}\right)}\right) .
\end{align}


\subsection{The Proposed Method}
To select the relevant features, we take the often used group-wise sparsity regularization on the rows of feature selection matrix $\mathbf{V}$, which is defined as follows
\begin{align}\label{obj_p3}
\min_{\mathbf{V}} \quad ||\mathbf{V}||_{2,1}.
\end{align}

By integrating the data reconstruction term in Eq.~\eqref{obj_p1}, the locality preserving regularization term in Eq.~\eqref{obj_p2} and the sparsity regularization in Eq.~\eqref{obj_p3}, we present the Local Sensitive Dual Concept Learning (LSDCL) induced unsupervised feature selection method by solving the following optimization problem,
\begin{align}
\label{opt_usv}
\min_{\mathbf{U, S, V}} \quad& ||\mathbf{X}-\mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^{2} + \lambda_{2}||\mathbf{V}||_{2,1} \\
+& \lambda_1 \sum_{i=1}^{n} \sum_{j=1}^{n} \mathbf{A}_{ij}\left(1-\exp^{\left(-\frac{||\mathbf{V}^T\mathbf{x}_{i}-\mathbf{V}^T\mathbf{x}_{j}||^{2}}{\delta^{2}}\right)}\right)   \nonumber \\
\st \quad & \mathbf{U} \geq 0, \mathbf{V} \geq 0 ,\mathbf{S} \geq 0, \nonumber
\end{align}
where $\lambda_{1}$ and $\lambda_{2}$ are regularization parameters.

%It is worthwhile to point out that the above method


\section{The Optimization Algorithm}\label{section_algo}
Because the optimization problem in Eq.~\eqref{opt_usv} comprises
three different variables with different regularizations and
constraints, it is hard to derive its closed solution directly.
Thus we derive an alternative iterative algorithm to solve
the problem, which converts the problem with a couple of
variables ($\mathbf{U}, \mathbf{S}, \mathbf{V}$) into a series of sub problems where only one variable is involved. The convergence and complexity analysis are further presented.

\subsection{Update $\mathbf{U}$ Given $\mathbf{S}$ and $\mathbf{V}$}
The optimization problem with respect to the variable $\mathbf{U}$ can be formulated as follows
\begin{align}
\label{opt_u}
\min_{\mathbf{U}} \quad || \mathbf{X}- \mathbf{X} \mathbf{U} \mathbf{S} \mathbf{V}^T\mathbf{X}  ||^2, \quad \st \quad \mathbf{U} \geq 0.
\end{align}
The above problem is a quadratic programming with non-negative constraint. One of the often used technique is to derive the multiplicative update rule as suggested by \cite{nmf,cf}. By introducing the Lagrangian multipliers $\mathbf{\Phi}$, it reduces to minimize the following unconstrained problem
\begin{align}\label{obj_u_2}
\min_{\mathbf{U}}  &\quad \mathbf{\mathcal{J}}(\mathbf{U}) = \tr(  \mathbf{X}\mathbf{U}\mathbf{S}\mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T\mathbf{U}^T\mathbf{X}^T) \nonumber\\
&- 2 \tr(\mathbf{X} \mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}\mathbf{X}^T )+ \tr(\mathbf{\Phi} \mathbf{U} )  \nonumber \\
&= \tr(\mathbf{U}^T \mathbf{F} \mathbf{U} \mathbf{G} ) - 2\tr( \mathbf{U}^T\mathbf{H} )+ \tr( \mathbf{U}^T\mathbf{\Phi} )
\end{align}
where $\mathbf{F} = \mathbf{X}^T\mathbf{X}$, $\mathbf{G} = \mathbf{S}\mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T$, and $\mathbf{H} = \mathbf{X}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T$. The partial derivative of $\mathbf{\mathcal{J}}(\mathbf{U})$ w.r.t $\mathbf{U}$ is
\begin{align}
\frac{\partial  \mathbf{\mathcal{J}}}{ \partial \mathbf{U}} = 2 \mathbf{F}\mathbf{U}\mathbf{G} -2 \mathbf{H}^T + \mathbf{\Phi}
\end{align}
Using the KKT conditions $\mathbf{\Phi}_{ip} \mathbf{U}_{ip}=0$, we get that
\begin{align}
\left( \mathbf{F}\mathbf{U}\mathbf{G} \right)_{ip} \mathbf{U}_{ip} - \mathbf{H}_{ip} \mathbf{U}_{ip} = 0.
\end{align}
By introducing $\mathbf{F} = \mathbf{F}^{+} - \mathbf{F}^{-}$, $\mathbf{G} = \mathbf{G}^{+} - \mathbf{G}^{-}$ and $\mathbf{H} = \mathbf{H}^{+} - \mathbf{H}^{-}$, where $\mathbf{F}^{+}_{ij} = (|\mathbf{F}_{ij}| + \mathbf{F}_{ij})/2,\mathbf{F}^{-}_{ij} = (|\mathbf{F}_{ij}| - \mathbf{F}_{ij})/2$, and setting the partial derivative with respect to zero, we can get the following multiplicative update rule for problem in Eq.~\eqref{opt_u}
\begin{equation}
\mathbf{U}_{i p} \leftarrow \mathbf{U}_{i p}
\sqrt{\frac{[\mathbf{F}^{+}\mathbf{U}\mathbf{G}^{-} + \mathbf{F}^{-}\mathbf{U}\mathbf{G}^{+}+\mathbf{H}^{+}]_{ip}}{[\mathbf{F}^{+}\mathbf{U}\mathbf{G}^{+} + \mathbf{F}^{-}\mathbf{U}\mathbf{G}^{-}+\mathbf{H}^{-}]_{ip}}} .
\label{eq:update_U}
\end{equation}

\begin{align}
\mathbf{G} &= \mathbf{S}\mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T \\
\mathbf{G} &= \mathbf{G}^{+} - \mathbf{G}^{-} \\
\mathbf{G}^{+}_{ij} &= (|\mathbf{G}_{ij}| + \mathbf{G}_{ij})/2 \\
\mathbf{G}^{-}_{ij} &= (|\mathbf{G}_{ij}| - \mathbf{G}_{ij})/2
\end{align}



\subsection{Update $\mathbf{V}$ Given $\mathbf{U}$ and $\mathbf{S}$}
The loss function with respect to $\mathbf{V}$ is non-quadratic, and it is difficult to be minimized directly. Fortunately, the half-quadratic minimization technique has been developed to optimize those loss functions. By introducing additional auxiliary variable, it reformulates a non-quadratic loss function as an augmented objective function in an enlarged parameter space. It has been shown that the half-quadratic iterations is a quasi-Newton method and substantially faster than gradient based methods \cite{nikolova2007equivalence}.

To make this paper self-contained, we first present the basic half-quadratic optimization techniques. According to the conjugate function \cite{boyd2004convex} and half-quadratic theory \cite{nikolova2007equivalence}, the following equations holds for $\mathbf{E}_{ij}$,
\begin{align}\label{f_cg}
 \ell(\mathbf{E}_{ij}) = \min_{\mathbf{W}_{ij} \in \mathbb{R}}  \mathbf{\mathcal{Q}}(\mathbf{E}_{ij}, \mathbf{W}_{ij}) + \phi(\mathbf{W}_{ij}),
\end{align}
where $\phi(\mathbf{W}_{ij})$ is the conjugate function of $\ell(\mathbf{E}_{ij})$, $\mathbf{W}_{ij}$ is the corresponded auxiliary variable, and $\mathbf{\mathcal{Q}}(\cdot,\cdot): \mathbb{R} \rightarrow \mathbb{R}$ is a quadratic term for $\mathbf{E}_{ij}$ and $\mathbf{W}_{ij}$. In this paper, we only consider the quadratic term of multiplicative form \cite{nikolova2006analysis}
 \begin{align}\label{f_q}
 \mathbf{\mathcal{Q}}(\mathbf{E}_{ij}, \mathbf{W}_{ij}) = \mathbf{W}_{ij} \mathbf{E}_{ij}^2.
 \end{align}
The minimization of the objective function in Eq.~\eqref{f_cg} is convex with respect to $\mathbf{W}$. The explicit optimum is given by
\begin{align}\label{update_ww}
\mathbf{W}_{ij} = \frac{\ell'(\mathbf{E}_{ij})}{\mathbf{E}_{ij}},
\end{align}
which only depends on the loss function $\ell(\cdot)$.


Based on the above half-quadratic minimization technique, we introduce two additional variables $\mathbf{W}$ and $\mathbf{Q}$ and get
\begin{align}\label{opt_uaw}
\min_{\mathbf{V}, \mathbf{W}, \mathbf{Q} } \quad & ||\mathbf{X}-\mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^{2}  \nonumber \\
+&\lambda_1 \sum_{i,j=1}^{n} \mathbf{A}_{i j}\left(\mathbf{W}_{ij}||\mathbf{V}^T \mathbf{x}_{i}-\mathbf{V}^T\mathbf{ x}_{j}||^{2}+\phi\left(\mathbf{W}_{ij}\right)\right)\nonumber\\
+& \lambda_{2}\sum_{i=1}^{n}\left(\mathbf{Q}_{ii} ||\mathbf{V}_i||^2 + \phi'(\mathbf{Q}_{ii})    \right),
\end{align}
When the $t$-th iteration of $\mathbf{V}^{t}$ is given, the optimal solution of $\mathbf{W}$ for the optimization problem in Eq~\eqref{opt_uaw} can be calculated by the following formula,
\begin{equation}
\mathbf{W}_{i j}=\exp^{\left(-\frac{\left\|(\mathbf{V}^t)^T\mathbf{x}_{i}-(\mathbf{V}^t)^T\mathbf{x}_{j}\right\|^{2}}{\delta^{2}}\right)}.
\label{update_w}
\end{equation}
Similar to other kernel methods, the choice of kernel parameters will affect the performance of the algorithm. In this paper, we empirically set the kernel parameters to the average reconstruction error as suggested by \cite{cimpca},
\begin{equation}
\delta^{2}=\frac{\gamma}{n^{2}} \sum_{i=1}^{n} \sum_{j=1}^{n}\left\|(\mathbf{V}^t)^T \mathbf{x}_{i}-(\mathbf{V}^t)^T \mathbf{x}_{j}\right\|^{2},
\label{eq:formulation_delta}
\end{equation}
where $\gamma$ is an adjustable parameter.
When $\mathbf{V}^t$ is given, the auxiliary variable $\mathbf{Q}$ is a diagonal matrix and its diagonal entries can be calculated as
\begin{equation}
\mathbf{Q}_{ii}=\frac{1}{||\mathbf{V}_{i}^t||_{2} + \epsilon}.
\label{update_q}
\end{equation}
where $\epsilon$ is a very small constant.

When these two auxiliary variables $\mathbf{W}$ and $\mathbf{Q}$ are given, we have the following quadratic programming on $\mathbf{V}$,
\begin{align}\label{opt_v}
\min_{\mathbf{V}} \quad & ||\mathbf{X}-\mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^{2} + \lambda_{2} \sum_{i=1}^{d} \mathbf{Q}_{ii} ||\mathbf{V}_i||^2  \nonumber \\
+& \lambda_1 \sum_{i,j=1}^{n} \mathbf{A}_{i j}\mathbf{W}_{ij}||\mathbf{V}^T \mathbf{x}_{i}-\mathbf{V}^T\mathbf{ x}_{j}||^{2}  \\
\st \quad & \mathbf{V} \geq 0 \nonumber
\end{align}

Then we introduce the Lagrangian multiplier $\mathbf{\Gamma}$ and get
\begin{align}\label{opt_v2}
\min_{\mathbf{V}} &\quad \mathbf{\mathcal{J}}(\mathbf{V}) \\
&=  \tr(  \mathbf{X}\mathbf{U}\mathbf{S}\mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T\mathbf{U}^T\mathbf{X}^T) \nonumber\\
&\quad - 2 \tr(\mathbf{X} \mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}\mathbf{X}^T )+ \tr(\mathbf{V}^T\mathbf{\Gamma}  ) \nonumber \\
& \quad + \lambda_1 \tr(\mathbf{V}^T \mathbf{Z}  \mathbf{V}) + \lambda_{2} \tr(\mathbf{V}^T \mathbf{Q} \mathbf{V}) \nonumber \\
&=\tr(\mathbf{V}^T \mathbf{B} \mathbf{V} \mathbf{P}) - 2\tr(\mathbf{V}^T \mathbf{M}) \nonumber \\
&\quad + \lambda_1 \tr(\mathbf{V}^T \mathbf{Z} \mathbf{V}) + \lambda_2 \tr(\mathbf{V}^T \mathbf{Q} \mathbf{V}) +  \tr(\mathbf{V}^T\mathbf{\Gamma} )\nonumber
\end{align}
where $\mathbf{B} = \mathbf{X}\mathbf{X}^T$, $\mathbf{P} = \mathbf{S}^T\mathbf{U}^T\mathbf{X}^T\mathbf{X}\mathbf{U}\mathbf{S}$, $\mathbf{M} = \mathbf{X}\mathbf{X}^T\mathbf{X}\mathbf{U}\mathbf{S}$, $\mathbf{Z} = 2\mathbf{X}(\mathbf{D} - \mathbf{W}\otimes \mathbf{A})\mathbf{X}^T$,  $\mathbf{D}_{ii} = \sum_{j=1}^{n}\mathbf{A}_{ij} \mathbf{W}_{ij}$ and $\mathbf{D}_{ij,i\neq j} = 0$. The partial derivative of $\mathbf{\mathcal{J}}(\mathbf{V})$ w.r.t. $\mathbf{V}$ is
\begin{align}
\frac{\partial  \mathbf{\mathcal{J}}}{ \partial \mathbf{V}} = 2 \mathbf{B}\mathbf{V}\mathbf{P} -2 \mathbf{M} + 2\lambda_1 \mathbf{Z} \mathbf{V} +2\lambda_2 \mathbf{Q} \mathbf{V} + \mathbf{\Gamma}
\end{align}
Using the KKT conditions $\mathbf{\Gamma}_{ip} \mathbf{V}_{ip}=0$, we get that
\begin{align}
\left( \mathbf{B}\mathbf{V}\mathbf{P}  - \mathbf{M}  + \lambda_1 \mathbf{Z} \mathbf{V} + \lambda_2 \mathbf{Q} \mathbf{V}\right)_{ip} \mathbf{V}_{ip}= 0.
\end{align}
By introducing $\mathbf{B} = \mathbf{B}^{+} - \mathbf{B}^{-}$, $\mathbf{P} = \mathbf{P}^{+} - \mathbf{P}^{-}$, $\mathbf{M} = \mathbf{M}^{+} - \mathbf{M}^{-}$, $\mathbf{Z}^{+} = 2(\mathbf{X} \mathbf{D} \mathbf{X}^T)^{+} + 2(\mathbf{X} (\mathbf{W} \otimes \mathbf{A}) \mathbf{X}^T)^{-}$ and $\mathbf{Z}^{-} = 2(\mathbf{X} \mathbf{D} \mathbf{X}^T)^{-} + 2(\mathbf{X} (\mathbf{W} \otimes \mathbf{A}) \mathbf{X}^T)^{+}$, where $\mathbf{B}^{+}_{ij} = (|\mathbf{B}_{ij}| + \mathbf{B}_{ij})/2,\mathbf{B}^{-}_{ij} = (|\mathbf{B}_{ij}| - \mathbf{B}_{ij})/2$, and setting the partial derivative with respect to zero, we can get the following multiplicative update rule for problem in Eq.~\eqref{opt_v}
\begin{equation}
\mathbf{V}_{i p} \leftarrow \mathbf{V}_{i p}
\sqrt{\frac{[\mathbf{B}^{+}\mathbf{V}\mathbf{P}^{-} + \mathbf{B}^{-}\mathbf{V}\mathbf{P}^{+}+\mathbf{M}^{+} + \lambda_1 \mathbf{Z}^{-}\mathbf{V}]_{ip}}{[\mathbf{B}^{+}\mathbf{V}\mathbf{P}^{+} + \mathbf{B}^{-}\mathbf{V}\mathbf{P}^{-}+\mathbf{M}^{-} + \lambda_1 \mathbf{Z}^{+}\mathbf{V} + \lambda_2 \mathbf{Q}\mathbf{V}]_{ip}}} .
\label{update_v}
\end{equation}
\subsection{Update $\mathbf{S}$ Given $\mathbf{U}$ and $\mathbf{V}$}
The optimization problem with respect to the variable $\mathbf{S}$ can be formulated as follows
\begin{align}
\label{opt_s}
\min_{\mathbf{S}} \quad || \mathbf{X}- \mathbf{X} \mathbf{U} \mathbf{S} \mathbf{V}^T\mathbf{X}  ||^2, \quad \mathbf{S} \geq 0.
\end{align}
We can derive similar procedure for the above problem. By setting the partial derivative to be zero, the optimal value of the above quadratic programming can be obtained by
\begin{equation}
\mathbf{S}_{i p} \leftarrow \mathbf{S}_{i p}
\sqrt{\frac{[\mathbf{C}^{+}\mathbf{S}\mathbf{D}^{-} + \mathbf{C}^{-}\mathbf{S}\mathbf{D}^{+}+\mathbf{O}^{+}]_{ip}}{[\mathbf{C}^{+}\mathbf{S}\mathbf{D}^{+} + \mathbf{C}^{-}\mathbf{S}\mathbf{D}^{-}+\mathbf{O}^{-}]_{ip}}} .
\label{update_s}
\end{equation}
where $\mathbf{C} = \mathbf{U}^T\mathbf{X}^T\mathbf{X}\mathbf{U}$, $\mathbf{D} = \mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}$ and $\mathbf{O} = \mathbf{U}^T\mathbf{X}^T\mathbf{X}\mathbf{X}^T\mathbf{V}$, $\mathbf{C} = \mathbf{C}^{+} - \mathbf{C}^{-}$.

The complete algorithm to solve the LSDCL optimization problem in Eq.~\eqref{opt_usv} is summarized in Algorithm \ref{alg:ours_algorithm}.
\begin{algorithm}
	\renewcommand{\algorithmicrequire}{\textbf{Input:}}
	\renewcommand{\algorithmicensure}{\textbf{Output:}}
	\caption{Local Sensitive Dual Concept Learning Algorithm for Unsupervised Feature Selection}
	\label{alg:ours_algorithm}
	\begin{algorithmic}[1]
		\REQUIRE Data matrix $\mathbf{X} \in \mathbb{R}^{d \times n}$, the number of data clusters and feature clusters $c$, regularization parameters $\lambda_{1}$, $\lambda_{2}$, the nearest neighbor $k$.
		\ENSURE Top $m$ features
        \STATE Calculate the $k$-nearest neighbor graph $\mathbf{A}$;
        %\STATE Calculate the eigenvector matrix $\mathbf{E}_{1}$ that corresponding to the $\mathbf{L}_{1}$ minimum c eigenvalues.
        %\STATE Repeat 10 k-means for  $\mathbf{E}_{1}$ , and use clustering results to initialize $\mathbf{U}$.
        %\STATE Let $\mathbf{X}$ do the above and use the clustering result to initialize $\mathbf{V}$.
        \STATE Initialize $\mathbf{U}$ and $\mathbf{V}$ using K-means;
        \STATE Initialize $\mathbf{S}$ as an identity matrix;
        \REPEAT
            \STATE Update $\mathbf{U}$ based on Eq.\eqref{eq:update_U};
            \STATE Update $\mathbf{S}$ based on Eq.\eqref{update_s};
            \STATE Update $\mathbf{W}$ based on Eq.\eqref{update_w};
            \STATE Update $\mathbf{Q}$ based on Eq.\eqref{update_q};
            \STATE Update $\mathbf{V}$ based on Eq.\eqref{update_v};
            \STATE Compute ${\delta}^{2}$ by Eq.\eqref{eq:formulation_delta};
            \UNTIL Converges
        \STATE {\bfseries Feature selection : }Sort all the features of X according to $\left\|\mathbf{V}_{i}\right\|_{2},(i=1,2, \cdots, d)$ in descending order and select the top-$m$ ranked ones.
	\end{algorithmic}
\end{algorithm}

\subsection{Convergence}
In this subsection, we will investigate the convergence of Algorithm \ref{alg:ours_algorithm}. We use the auxiliary function approach \cite{nmf} to prove the convergence of the algorithm. Here we first introduce the definition of auxiliary function \cite{nmf}.
\begin{definition}
     \cite{nmf} $\mathbf{\mathcal{J}}(h, h')$ is an auxiliary function for    $\ell(h)$ if the following conditions holds
    \begin{align}
        \mathbf{\mathcal{J}}(h, h') \geq \ell(h), \mathbf{\mathcal{J}}(h, h) = \ell(h).
    \end{align}
\end{definition}


\begin{lemma}\label{lemma_next_optimal}
    \cite{nmf} If $\mathbf{\mathcal{J}}(h, h')$ is an auxiliary function for $\ell(.)$, then $\ell(.)$ is non-increasing under the update
    \begin{align}
        h^{t+1} = \arg \min_{h} \mathbf{\mathcal{J}}(h, h^t)
    \end{align}
    \begin{proof}
        $\mathbf{\mathcal{J}}(h^{t+1}) \leq \mathbf{\mathcal{J}}(h^{t+1}, h^t) \leq \mathbf{\mathcal{J}}(h^t, h^t) = \mathbf{\mathcal{J}}(h^t). $
    \end{proof}
\end{lemma}

\begin{lemma}\label{lemma_tr_neq}
    \cite{snmf} For any nonnegative matrices $\mathbf{A} \in \mathcal{R}^{n \times n }$, $\mathbf{B} \in \mathcal{R}^{k \times k }$, $\mathbf{S} \in \mathcal{R}^{n \times k }$,$\mathbf{S}' \in \mathcal{R}^{n \times k }$, and $\mathbf{A}$, $\mathbf{B}$ are symmetric,  then the following inequality holds
    \begin{align}
    \sum_{i=1}^{n}\sum_{j=1}^{k}\frac{(\mathbf{A}\mathbf{S}'\mathbf{B})_{ij} \mathbf{S}_{ij}^2}{\mathbf{S}'_{ij}} \geq \tr(\mathbf{S}^T\mathbf{A}\mathbf{S}\mathbf{B})
    \end{align}
\end{lemma}

In the following, we will present 4 theorems, which guarantee the convergence of Algorithm \ref{alg:ours_algorithm}.

\begin{theorem}\label{theorem_u}
    Let
    \begin{align}
        \mathbf{\mathcal{J}}(\mathbf{U}) &= \tr(  \mathbf{X}\mathbf{U}\mathbf{S}\mathbf{V}^T\mathbf{X}\mathbf{X}^T\mathbf{V}\mathbf{S}^T\mathbf{U}^T\mathbf{X}^T) \nonumber\\
        &- 2 \tr(\mathbf{X} \mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}\mathbf{X}^T ) \nonumber \\
        &= \tr(\mathbf{U}^T \mathbf{F} \mathbf{U} \mathbf{G} ) - 2\tr( \mathbf{U}^T \mathbf{H})
    \end{align}
    Then the following function
    \begin{align}
    &\quad\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})  \\
    &= \sum_{i=1}^{n}\sum_{p=1}^{k} \frac{(\mathbf{F}^{+} \mathbf{U'} \mathbf{G}^{+} )_{ip} \mathbf{U}_{ip}^{2} }{\mathbf{U'}_{ip}} + \sum_{i=1}^{n}\sum_{p=1}^{k} \frac{(\mathbf{F}^{-} \mathbf{U'} \mathbf{G}^{-} )_{ip} \mathbf{U}_{ip}^{2} }{\mathbf{U'}_{ip}} \nonumber \\
    &-\sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{F}_{ij}^{+}\mathbf{G}_{qp}^{-}\mathbf{U'}_{ip}\mathbf{U'}_{jq}(1 + \log \frac{\mathbf{U}_{ip}\mathbf{U}_{jq}}{\mathbf{U'}_{ip}\mathbf{U'}_{jq}} ) \nonumber \\
    &- \sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{F}_{ij}^{-}\mathbf{G}_{qp}^{+}\mathbf{U'}_{ip}\mathbf{U'}_{jq}(1 + \log \frac{\mathbf{U}_{ip}\mathbf{U}_{jq}}{\mathbf{U'}_{ip}\mathbf{U'}_{jq}} ) \nonumber \\
    &-2\sum_{i=1}^{n}\sum_{p=1}^{k} \mathbf{H}^{-}_{ip}  \mathbf{U'}_{ip} (1 + \log \frac{\mathbf{U}_{ip}}{\mathbf{U'}_{ip}} ) \nonumber\\
    &+ 2 \sum_{i=1}^{n}\sum_{p=1}^{k}\mathbf{H}^{-}_{ip} \frac{\mathbf{U}_{ip}^2 + \mathbf{U'}_{ip}^2}{2\mathbf{U'}_{ip}}\nonumber
    \end{align}
    is an auxiliary function for $\mathbf{\mathcal{J}}(\mathbf{U})$. Furthermore, it is a convex function in $\mathbf{U}$ and its global minimum is
    \begin{equation}
    \mathbf{U}_{i p} \leftarrow \mathbf{U}_{i p}
    \sqrt{\frac{[\mathbf{F}^{+}\mathbf{U}\mathbf{G}^{-} + \mathbf{F}^{-}\mathbf{U}\mathbf{G}^{+}+\mathbf{H}^{+}]_{ip}}{[\mathbf{F}^{+}\mathbf{U}\mathbf{G}^{+} + \mathbf{F}^{-}\mathbf{U}\mathbf{G}^{-}+\mathbf{H}^{-}]_{ip}}}.
    \end{equation}
    \begin{proof}
        See Appendix \ref{proof_u}.
    \end{proof}

\end{theorem}

\begin{theorem}
    Updating $\mathbf{U}$ using Eq.~\eqref{eq:update_U} will monotonically decrease the value of the objective in Eq.~\eqref{opt_u}, hence it converges.
    \begin{proof}
        By Lemma \ref{lemma_next_optimal} and Theorem \ref{theorem_u}, we can get  $\mathbf{\mathcal{J}}(\mathbf{U}^0) = \mathbf{\mathcal{J}}(\mathbf{U}^0,\mathbf{U}^0) \geq \mathbf{\mathcal{J}}(\mathbf{U}^1,\mathbf{U}^0) \geq
        \mathbf{\mathcal{J}}(\mathbf{U}^1,\mathbf{U}^1) \ldots$, So $\mathbf{\mathcal{J}}(\mathbf{U})$is monotonically decreasing. Since $\mathbf{\mathcal{J}}(\mathbf{U})$ is obviously bounded below, this theorem is proved.
    \end{proof}
\end{theorem}

\begin{theorem}\label{theorem_v}
    Let
    \begin{align}
    \mathbf{\mathcal{J}}(\mathbf{V}) &=\tr(\mathbf{V}^T \mathbf{B} \mathbf{V} \mathbf{P}) - 2\tr(\mathbf{V}^T \mathbf{M}) \nonumber \\
    &+ \lambda_1 \tr(\mathbf{V}^T \mathbf{Z} \mathbf{V}) + \lambda_2 \tr(\mathbf{V}^T \mathbf{Q} \mathbf{V}) \nonumber
    \end{align}
    Then the following function
    \begin{align}
   &\quad\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})  \\
   &= \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{B}^{+} \mathbf{V'} \mathbf{P}^{+} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} + \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{B}^{-} \mathbf{V'} \mathbf{P}^{-} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} \nonumber \\
   &-\sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{B}_{ij}^{+}\mathbf{P}_{qp}^{-}\mathbf{V'}_{ip}\mathbf{V'}_{jq}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jq}}{\mathbf{V'}_{ip}\mathbf{V'}_{jq}} ) \nonumber \\
   &- \sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{B}_{ij}^{-}\mathbf{P}_{qp}^{+}\mathbf{V'}_{ip}\mathbf{V'}_{jq}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jq}}{\mathbf{V'}_{ip}\mathbf{V'}_{jq}} ) \nonumber \\
   &-2\sum_{i=1}^{d}\sum_{p=1}^{k} \mathbf{M}^{-}_{ip}  \mathbf{V'}_{ip} (1 + \log \frac{\mathbf{V}_{ip}}{\mathbf{V'}_{ip}} ) \nonumber\\
   &+ 2 \sum_{i=1}^{d}\sum_{p=1}^{k}\mathbf{M}^{-}_{ip} \frac{\mathbf{V}_{ip}^2 + \mathbf{V'}_{ip}^2}{2\mathbf{V'}_{ip}}\nonumber \\
   &+ \lambda_1\sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{Z}^{+} \mathbf{V'}  )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} +\lambda_2 \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{Q} \mathbf{V'} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} \nonumber \\
    &- \lambda_1 \sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k} \mathbf{Z}_{ij}^{-}\mathbf{V'}_{ip}\mathbf{V'}_{jp}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jp}}{\mathbf{V'}_{ip}\mathbf{V'}_{jp}} )\nonumber
   \end{align}
   is an auxiliary function for $\mathbf{\mathcal{J}}(\mathbf{V})$. Furthermore, it is a convex function in $\mathbf{V}$ and its global minimum is
   \begin{equation}
   \mathbf{V}_{i p} \leftarrow \mathbf{V}_{i p}
   \sqrt{\frac{[\mathbf{B}^{+}\mathbf{V}\mathbf{P}^{-} + \mathbf{B}^{-}\mathbf{V}\mathbf{P}^{+}+\mathbf{M}^{+} + \lambda_1 \mathbf{Z}^{-}\mathbf{V}]_{ip}}{[\mathbf{B}^{+}\mathbf{V}\mathbf{P}^{+} + \mathbf{B}^{-}\mathbf{V}\mathbf{P}^{-}+\mathbf{M}^{-} + \lambda_1 \mathbf{Z}^{+}\mathbf{V} + \lambda_2 \mathbf{Q}\mathbf{V}]_{ip}}} .
   \end{equation}
       \begin{proof}
       See Appendix \ref{proof_v}.
   \end{proof}
\end{theorem}

\begin{theorem}
    Updating $\mathbf{V}$ using Eq.~\eqref{update_v} will monotonically decrease the value of the objective in Eq.~\eqref{opt_v}, hence it converges.
    \begin{proof}
        By Lemma \ref{lemma_next_optimal} and Theorem \ref{theorem_v}, we can get  $\mathbf{\mathcal{J}}(\mathbf{V}^0) = \mathbf{\mathcal{J}}(\mathbf{V}^0,\mathbf{V}^0) \geq \mathbf{\mathcal{J}}(\mathbf{V}^1,\mathbf{V}^0) \geq
        \mathbf{\mathcal{J}}(\mathbf{V}^1,\mathbf{V}^1) \ldots$, So $\mathbf{\mathcal{J}}(\mathbf{V})$is monotonically decreasing. Since $\mathbf{\mathcal{J}}(\mathbf{V})$ is obviously bounded below, this theorem is proved.
    \end{proof}
\end{theorem}

The auxiliary function and the convergence proof of update $\mathbf{S}$ with Eq.~\eqref{update_s} can be obtained in a similar way. We omit these due to space limitation.
\subsection{Complexity}
%The pre-computation of $\mathbf{X}\mathbf{X}^T$, $\mathbf{X}^T\mathbf{X}$,$\mathbf{X}\mathbf{X}^T\mathbf{X}$ and $\mathbf{X}^T\mathbf{X}\mathbf{X}^T$ needs $\mathbf{O}(nd^2 + n^2d)$.
The cost for update $\mathbf{U}$ requires $\mathbf{O}(n^2k+ d^2k + nk^2 + dk^2 + ndk  + nk )$. The cost for update $\mathbf{S}$ requires $\mathbf{O}(n^2k + d^2k + ndk + k^3 + nk^2)$. The cost for update $\mathbf{V}$ requires $\mathbf{O}(n^2k + d^2k + nk^2 + ndk + dk^2 + nk )$. Since $k < < \min(d, n)$, the overall complexity for solving Eq.~\eqref{opt_usv} can be summarized as $\mathbf{O}((n^2+d^2 +nd)kt)$ where $t$ is the number of iterations.

%\section{Discussion}
%\subsection{Connection to the L2/L1-UFS methods}
%
%The problem of $\ell_{2}$-norm based graph regularized unsupervised feature selection (\textbf{L2UFS}) can be formulated as follows
%\begin{align}
%\min_{\mathbf{W}} \quad& ||\mathbf{X} - \mathbf{W}^T\mathbf{X}||_{2,1} \\
%+& \lambda_{1}\sum_{i,j=1}^{n}\mathbf{A}_{ij}||\mathbf{W}^T\mathbf{x}_i - \mathbf{W}^T\mathbf{x}_j||^2 + \lambda_{2}||\mathbf{W}||_{2,1}.\nonumber
%\end{align}
%Be denoting the Laplacian matrix $\mathbf{L} = \mathbf{D} - \mathbf{A}$ and its eigen-decomposition result $\mathbf{L} = \mathbf{UVU}^T$, the second term can be further reformulated into
%\begin{align}
%\tr(\mathbf{W}^T \mathbf{X}\mathbf{L} \mathbf{X}^T \mathbf{W}) =&\tr(\mathbf{W}^T \mathbf{X}\mathbf{UVU}^T \mathbf{X}^T \mathbf{W}) \\
%=&||\mathbf{V}^{\frac{1}{2}}\mathbf{U}^T\mathbf{U}\mathbf{X}^T\mathbf{W}||_{2}^2 \nonumber
%\end{align}
%Then, the robust $\ell_{1}$-norm based graph regularized unsupervised feature selection method (\textbf{L1UFS}) can be written as
%\begin{align}
%\min_{\mathbf{W}} \quad& ||\mathbf{X} - \mathbf{W}^T\mathbf{X}||_{2,1} + \lambda_{1}||\mathbf{V}^{\frac{1}{2}} \mathbf{U}^T\mathbf{U} \mathbf{W}||_{1} + \lambda_{2}||\mathbf{W}||_{2,1}.
%\end{align}
%where
%It can be seen that, Our methods

\section{EXPERIMENT}
In this section, we conduct a series of experiments to demonstrate the effectiveness of our proposed algorithm for the task of unsupervised feature selection.

\subsection{Data Sets}
The experiments are conducted on ten public data sets that come from all kinds of applications, including Housing, WINE, USPS49, ZOO, COIL20, ECOLI, ORL, JAFFE, TOX, GLIOMA. The statistics of these data sets, including the number of data samples, the dimension of each sample, the types and categories of each dataset and the number of selected features are summarized in Table~\ref{table:dataset}.
\begin{table}
\caption{Details of these data sets and the number of selected features}
\begin{tabular}{ c  c  c  c  c  }
\hline
Data Sets & Sample & Feature & Class & Selected Features\\ \hline
Housing & 506 & 13 & 2 & [1,2,...,9]\\
WINE & 178 & 13 & 3 & [1,2,...,9]\\
USPS49 & 1673 & 256 & 2 & [5,10,...,50]\\
ZOO & 101 & 16 & 7 & [1,2,...,9]\\
COIL20 & 1440 & 1024 & 20 & [5,10,...,50]\\
ECOLI & 336 & 343 & 8 & [5,10,...,50]\\
ORL & 400 & 1024 & 40 & [5,10,...,50]\\
JAFFE & 213 & 676 & 10 & [5,10,...,50]\\
TOX & 171 & 5748 & 4 & [5,10,...,50]\\
GLIOMA & 50 & 4434 & 4 & [5,10,...,50]\\ \hline
\end{tabular}
\label{table:dataset}
\end{table}

\subsection{Compared Algorithms}
In order to evaluate the performance of our proposed algorithm, we compare it with the following representative and states-of-the-art unsupervised feature selection methods:

\begin{itemize}

\item AllFea. All original features are used.
\item LapScore\footnote{\url{http://www.cad.zju.edu.cn/home/dengcai/Data/code/LaplacianScore.m}}\cite{lapscore}. It is a filter method to select those features that can best preserve the local structure of data.
\item MCFS\footnote{\url{http://www.cad.zju.edu.cn/home/dengcai/Data/code/MCFS_p.m}}\cite{mcfs}. It exploits the multi-cluster structure of data via spectral analysis and selects these best aligned features through sparse spectral regression.
\item UDFS\footnote{\url{http://www.cs.cmu.edu/~yiyang/UDFS.rar}}\cite{udfs}. The algorithm UDFS selects the most discriminative features by exploiting both the discriminative information and feature correlations.
\item NDFS\footnote{\url{https://sites.google.com/site/zcliustc}}\cite{ndfs}. It jointly performs feature selection via $\ell_{2,1}$-norm regularized regression and spectral clustering with additional nonnegative and orthogonal constraints.
\item RUFS\footnote{\url{https://sites.google.com/site/qianmingjie}} \cite{rufs}, which performs robust clustering and robust feature selection simultaneously to select the most important and discriminative features.

\item SOGFS\footnote{\url{http://www.escience.cn/system/file?fileId=83492}}\cite{sogfs}. It is an unsupervised feature selection method which performs feature selection and local structure learning simultaneously so that the similarity matrix can be determined adaptively.

\item ULAP\footnote{\url{http://www.escience.cn/system/file?fileId=102290}}\cite{ulap}. It is an unsupervised feature selection method which selects features by learning an adaptive similarity matrix and a projection matrix in each iteration.

\item L1UFS\footnote{\url{https://pan.baidu.com/s/1dEUmG17}}\cite{l1ufs}. It is an unsupervised feature selection method that tries to preserve the local geometric structure of data by using the Laplacian regularization term based on the $\ell_{1}$-norm.

\end{itemize}
These compared algorithms can be roughly categorized into four types: AllFea is the baseline method. LapScore is the representative filter-based method, MCFS, UDFS, NDFS, RUFS, SOGFS, ULAP are representative embedded methods, L1UFS and LSDCL are most recent work on reconstruction based methods.

\textbf{For the purpose of reproducibility, we provide all the dataset and the code at \url{https://gitee.com/csliangdu/LSDCL-UFS}.}

\subsection{Experiment Setup}
There are some parameters that need to be specified in the experiments. For all the data sets, the size of the neighborhood $k$ in all methods except L1UFS is set to 5. The weight of $k$-nn graph for LapScore is based on the Gaussian kernel whose kernel width is searched in the range of $\left\{2^{-3}, 2^{-2}, \ldots, 2^{3}\right\}$. In addition, for the weight of $k$-nn graph of MCFS, we report the best result among the 0-1 weight and the Gaussian kernel weight, where the kernel width of the Gaussian kernel is also searched in the range of $\left\{2^{-3}, 2^{-2}, \ldots, 2^{3}\right\}$. In order to make the experiments equitable enough, the regularization parameters are searched from the range of $\left\{10^{-5}, 10^{-4}, \ldots, 10^{5}\right\}$, for UDFS, NDFS, RUFS, SOGFS, ULAP, L1UFS and our proposed algorithm. Furthermore, the dimension of the projection matrix in SOGFS is set to $\frac{d}{3}$ according to the suggestion of the paper\cite{sogfs}. For our proposed algorithm, the parameter $\gamma$ is searched in the range of $\left\{2^{-3}, 2^{-2}, \ldots, 2^{3}\right\}$, and we use the 0-1 weight as the weight of $k$-nn graph. All these hyper-parameters of all methods are selected under the same grid-search strategy to make the experiments fair enough.

For the selected features, we evaluate the performance base on k-means clustering performance by two widely used clustering metrics, i.e. Accurate(ACC) and Normalized Mutual Information(NMI). In order to alleviate the random effect as much as possible, we run k-means clustering 20 times with random initialization on the selected subset and record the average results.
\subsection{Evaluation Metrics}
To evaluate their performance, we compare the generated clusters with the ground truth by computing the following three performance measures.

\textbf{Clustering accuracy (ACC)}. The first performance measure is the clustering accuracy, which discovers the one-to-one relationship between clusters and classes. Given a point $\bm{x}_i$, let $p_i$ and $q_i$ be the clustering result and the ground truth label, respectively. The ACC is defined as follows:
\begin{equation}
\textrm{ACC} = \frac{1}{n}\sum_{i=1}^{n}\delta(q_i, map(p_i)),
\end{equation}
where $n$ is the total number of samples and $\delta(x,y)$ is the delta function that equals 1 if $x=y$ and equals 0 otherwise, and $map(\cdot)$ is the permutation mapping function that maps each cluster index to a true class label. The best mapping can be found by using the Kuhn-Munkres algorithm \cite{map}. The greater clustering accuracy means the better clustering performance.


\textbf{Normalized mutual information (NMI)}. Another evaluation metric that we adopt here is the normalized mutual information, which is widely used for determining the quality of clustering. Let $\mathcal{C}$ be the set of clusters from the ground truth and $\mathcal{C'}$ obtained from a clustering algorithm. Their mutual information $MI(\mathcal{C}, \mathcal{C'})$ is defined as follows:
\begin{equation}
\textrm{MI}(\mathcal{C}, \mathcal{C'}) = \sum_{c_i \in \mathcal{C},c'_j \in \mathcal{C'}} p(c_i,c'_j) \log \frac{p(c_i,c'_j)}{p(c_i) p(c'_j)},
\end{equation}
where $p(c_i)$ and $p(c'_j)$ are the probabilities that a data point arbitrarily selected from the data set belongs to the cluster $c_i$ and $c'_j$, respectively, and $p(c_i,c'_j)$ is the joint probability that the arbitrarily selected data point belongs to the cluster $c_i$ as well as $c'_j$ at the same time. In our experiments, we use the normalized mutual information as follows:
\begin{equation}
\textrm{NMI}(\mathcal{C}, \mathcal{C'}) = \frac{ \textrm{MI}(\mathcal{C}, \mathcal{C'})}{\max(H(\mathcal{C}), H(\mathcal{C'}))},
\end{equation}
where $H(\mathcal{C})$ and  $H(\mathcal{C'})$ are the entropies of $\mathcal{C}$ and $\mathcal{C'}$, respectively. Again, a larger NMI indicates a better performance.
\subsection{Clustering with Selected Features}
Since the best number of selected features is uncertain, we report the average of the results for all of the selected features in order to evaluate the performance of the algorithms better and more equitably (The range of the number of selected features for each data set is shown in Table 1). To fully investigate the significance among these compared feature selection algorithms except AllFea, we conduct pairwise t-test between the method with best result and the rest methods and report their corresponding $p$-value.


The performance of these unsupervised feature selection methods evaluated by ACC and NMI are shown in Table~\ref{table:res-acc} and Table~\ref{table:res-nmi}, respectively. For all unsupervised feature selection methods, the results in Table~\ref{table:res-acc} and Table~\ref{table:res-nmi} are expressed as the mean $\pm$ standard deviation and the $p$-value. At the same time, the best result and those results that have no significant difference ($p$ $\geq$ 0.05) from the best result are marked as bold for each data set. The last row of Table~\ref{table:res-acc} and Table~\ref{table:res-nmi} show the average result of all the data sets.


The results in Tables~\ref{table:res-acc} and~\ref{table:res-nmi} show that most of the feature selection algorithms can improve clustering performance effectively compared with using all features directly. These results demonstrate that using a few features selected from the raw data by the feature selection algorithm can not only reduce time and memory overhead in the learning task but also improve the performance of learning algorithms effectively. It is worth mentioning that our proposed algorithm improves by 9.12$\%$ and 12.40$\%$ respectively in ACC and NMI compare with no feature selection algorithm is used. It is also easy to observe that our proposed algorithm performs better than the other eight feature selection algorithms.


\subsection{Effect of Each Term}
In this subsection, we conduct well-directed experiments on different variants of our method to carefully verify the effectiveness of the first and second terms in Eq.~\eqref{opt_usv} for the task of unsupervised feature selection. With different configuration of the first and the second term, we can derive the following three different variants.

The first variant we called $\mathbf{M}_1$ can be formulated as
\begin{align}
\label{opt_v1}
\min_{\mathbf{U},\mathbf{V}} \quad& ||\mathbf{X}^T - \mathbf{U}\mathbf{V}^T||^2 + \lambda_1 \tr (\mathbf{U}^{T} \mathbf{L} \mathbf{U}) + \lambda_{2}||\mathbf{V}||_{2,1} \\
\st \quad& \mathbf{U} \geq 0, \mathbf{V} \geq 0, \nonumber
\end{align}
where $\mathbf{L} = \mathbf{D} - \mathbf{A}$. The above formulation is equivalent to the method of Embedded Unsupervised Feature Selection (EUFS) \cite{eufs} except the orthogonal constraint on $\mathbf{U}^T \mathbf{U} = \mathbf{I}$ and the non-negative constraint on $\mathbf{V} \geq 0$.

The second variant $\mathbf{M}_2$ can be obtained by replacing the first term as
\begin{align}
\label{opt_variant2}
\min_{\mathbf{U},\mathbf{S},\mathbf{V}} \quad& ||\mathbf{X} - \mathbf{X}\mathbf{U} \mathbf{S} \mathbf{V}^T \mathbf{X}||^2 + \lambda_1 \tr (\mathbf{U}^{T} \mathbf{L} \mathbf{U}) + \lambda_{2}||\mathbf{V}||_{2,1}\nonumber \\
\st \quad& \mathbf{U} \geq 0,, \mathbf{S} \geq 0, \mathbf{V} \geq 0.
\end{align}

\begin{table*}
    \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
    \caption{ The aggregate of the clustering results evaluated by Accuracy $(\%)$. }
    \tiny \centering \label{table:res-acc}
    \begin{tabular}{ c ||  c || c || c  c  c  c  c  c  || c  c }
        \hline
        Data Sets & AllFea & LapScore & MCFS & UDFS & NDFS & RUFS & SOGFS & ULAP & L1UFS & LSDCL\\ \hline
        Housing& \tabincell{c}{ 61.26 } & \tabincell{c}{ 61.26 $\pm$ 0.00 \\0.00 } & \tabincell{c}{ 67.01 $\pm$ 3.21 \\0.01 } & \tabincell{c}{ 67.84 $\pm$ 4.51 \\0.01 } & \tabincell{c}{ 69.17 $\pm$ 7.67 \\0.02 } & \tabincell{c}{ 73.45 $\pm$ 4.21 \\0.05 } & \tabincell{c}{ 62.60 $\pm$ 2.23 \\0.00 } & \tabincell{c}{ 66.38 $\pm$ 4.80 \\0.01 } & \tabincell{c}{ 73.75 $\pm$ 4.06 \\0.04 } & \tabincell{c}{ \textbf{74.53} \textbf{$\pm$ 3.65 } \\ \textbf{1.00 }} \\ \hline
        WINE& \tabincell{c}{ 67.81 } & \tabincell{c}{ 67.63 $\pm$ 0.21 \\0.00 } & \tabincell{c}{ 77.61 $\pm$ 11.63 \\0.01 } & \tabincell{c}{ 77.38 $\pm$ 10.03 \\0.01 } & \tabincell{c}{ 82.13 $\pm$ 4.74 \\0.03 } & \tabincell{c}{ \textbf{87.46} \textbf{$\pm$ 3.97 } \\ \textbf{0.94 }} & \tabincell{c}{ 75.23 $\pm$ 5.50 \\0.00 } & \tabincell{c}{ 70.64 $\pm$ 7.85 \\0.00 } & \tabincell{c}{ \textbf{86.44} \textbf{$\pm$ 7.11 } \\ \textbf{0.43 }} & \tabincell{c}{ \textbf{87.47} \textbf{$\pm$ 4.14 } \\ \textbf{1.00 }} \\ \hline

        USPS49 & \tabincell{c}{ 77.70 } & \tabincell{c}{ 69.21 $\pm$ 8.95 \\0.00 } & \tabincell{c}{ 81.91 $\pm$ 6.91 \\0.00 } & \tabincell{c}{ \textbf{93.99} \textbf{$\pm$ 3.01 } \\ \textbf{0.11 }} & \tabincell{c}{ 88.27 $\pm$ 6.18 \\0.01 } & \tabincell{c}{ 84.58 $\pm$ 3.48 \\0.00 } & \tabincell{c}{ 69.75 $\pm$ 5.33 \\0.00 } & \tabincell{c}{ 56.69 $\pm$ 4.12 \\0.00 } & \tabincell{c}{ \textbf{93.28} \textbf{$\pm$ 5.99 } \\ \textbf{0.25 }} & \tabincell{c}{ \textbf{95.54} \textbf{$\pm$ 0.62 } \\ \textbf{1.00 }} \\ \hline

        ZOO& \tabincell{c}{ 71.98 } & \tabincell{c}{ 74.01 $\pm$ 6.39 \\0.00 } & \tabincell{c}{ 61.78 $\pm$ 7.81 \\0.00 } & \tabincell{c}{ 81.61 $\pm$ 4.66 \\0.00 } & \tabincell{c}{ 75.81 $\pm$ 5.71 \\0.00 } & \tabincell{c}{ \textbf{84.21} \textbf{$\pm$ 6.32 } \\ \textbf{0.75 }} & \tabincell{c}{ 77.89 $\pm$ 9.24 \\0.00 } & \tabincell{c}{ \textbf{80.26} \textbf{$\pm$ 10.98 } \\ \textbf{0.06 }} & \tabincell{c}{ 82.25 $\pm$ 5.30 \\0.01 } & \tabincell{c}{ \textbf{84.42} \textbf{$\pm$ 6.20 } \\ \textbf{1.00 }} \\ \hline

        COIL20 & \tabincell{c}{ 59.17 } & \tabincell{c}{ 45.60 $\pm$ 6.16 \\0.00 } & \tabincell{c}{ 56.64 $\pm$ 4.41 \\0.00 } & \tabincell{c}{ 31.43 $\pm$ 16.91 \\0.00 } & \tabincell{c}{ 59.07 $\pm$ 4.54 \\0.00 } & \tabincell{c}{ 59.20 $\pm$ 3.28 \\0.00 } & \tabincell{c}{ 54.72 $\pm$ 3.48 \\0.00 } & \tabincell{c}{ 31.04 $\pm$ 17.00 \\0.00 } & \tabincell{c}{ 59.40 $\pm$ 4.49 \\0.01 } & \tabincell{c}{ \textbf{62.28} \textbf{$\pm$ 2.01 } \\ \textbf{1.00 }} \\ \hline

        ECOLI& \tabincell{c}{ 57.44 } & \tabincell{c}{ 57.25 $\pm$ 2.54 \\0.00 } & \tabincell{c}{ 56.90 $\pm$ 5.39 \\0.00 } & \tabincell{c}{ 57.26 $\pm$ 2.76 \\0.00 } & \tabincell{c}{ 57.72 $\pm$ 2.50 \\0.00 } & \tabincell{c}{ 59.50 $\pm$ 2.48 \\0.00 } & \tabincell{c}{ 56.86 $\pm$ 1.86 \\0.00 } & \tabincell{c}{ 57.42 $\pm$ 3.62 \\0.00 } & \tabincell{c}{ 58.52 $\pm$ 2.66 \\0.00 } & \tabincell{c}{ \textbf{60.89} \textbf{$\pm$ 2.68 } \\ \textbf{1.00 }} \\ \hline

        ORL & \tabincell{c}{ 51.79 } & \tabincell{c}{ 41.77 $\pm$ 2.01 \\0.00 } & \tabincell{c}{ 48.57 $\pm$ 4.68 \\0.02 } & \tabincell{c}{ \textbf{49.61} \textbf{$\pm$ 3.08 } \\ \textbf{0.16 }} & \tabincell{c}{ 47.36 $\pm$ 2.72 \\0.00 } & \tabincell{c}{ 48.59 $\pm$ 3.91 \\0.00 } & \tabincell{c}{ 47.32 $\pm$ 3.32 \\0.00 } & \tabincell{c}{ 42.44 $\pm$ 2.60 \\0.00 } & \tabincell{c}{ \textbf{49.45} \textbf{$\pm$ 2.26 } \\ \textbf{0.12 }} & \tabincell{c}{ \textbf{50.29} \textbf{$\pm$ 3.08 } \\ \textbf{1.00 }} \\ \hline

        JAFFE & \tabincell{c}{ 71.57 } & \tabincell{c}{ 67.62 $\pm$ 8.49 \\0.00 } & \tabincell{c}{ 76.46 $\pm$ 1.87 \\0.01 } & \tabincell{c}{ 73.46 $\pm$ 1.33 \\0.00 } & \tabincell{c}{ 74.66 $\pm$ 2.75 \\0.00 } & \tabincell{c}{ 75.39 $\pm$ 2.52 \\0.00 } & \tabincell{c}{ 73.46 $\pm$ 1.11 \\0.00 } & \tabincell{c}{ 69.02 $\pm$ 3.61 \\0.00 } & \tabincell{c}{ 76.49 $\pm$ 1.06 \\0.00 } & \tabincell{c}{ \textbf{77.98} \textbf{$\pm$ 1.65 } \\ \textbf{1.00 }} \\ \hline

        TOX& \tabincell{c}{ 43.65 } & \tabincell{c}{ 40.14 $\pm$ 0.88 \\0.00 } & \tabincell{c}{ 40.11 $\pm$ 1.51 \\0.00 } & \tabincell{c}{ 45.26 $\pm$ 0.92 \\0.00 } & \tabincell{c}{ 44.59 $\pm$ 2.47 \\0.00 } & \tabincell{c}{ \textbf{50.74} \textbf{$\pm$ 0.70 } \\ \textbf{0.06 }} & \tabincell{c}{ 46.92 $\pm$ 1.28 \\0.00 } & \tabincell{c}{ 48.15 $\pm$ 1.69 \\0.00 } & \tabincell{c}{ 43.80 $\pm$ 1.13 \\0.00 } & \tabincell{c}{ \textbf{51.90} \textbf{$\pm$ 1.28 } \\ \textbf{1.00 }} \\ \hline

        GLIOMA & \tabincell{c}{ 58.70 } & \tabincell{c}{ 54.92 \\ $\pm$ 0.56 \\0.00 } & \tabincell{c}{ 52.96 \\ $\pm$ 1.41 \\0.00 } & \tabincell{c}{ 52.91 \\ $\pm$ 2.59 \\0.00 } & \tabincell{c}{ 58.24 \\ $\pm$ 1.20 \\0.00 } & \tabincell{c}{ 57.94 \\ $\pm$ 0.75 \\0.00 } & \tabincell{c}{ 60.13 \\ $\pm$ 1.18 \\0.00 } & \tabincell{c}{ 48.28 \\ $\pm$ 0.94 \\0.00 } & \tabincell{c}{ 61.00 \\ $\pm$ 1.03 \\0.00 } & \tabincell{c}{ \textbf{65.72} \\ \textbf{$\pm$ 0.99 } \\ \textbf{1.00 }} \\ \hline

        Average & \tabincell{c}{ 62.11 } & \tabincell{c}{ 57.94 } & \tabincell{c}{ 62.00 } & \tabincell{c}{ 63.08 } & \tabincell{c}{ 65.70 } & \tabincell{c}{ 68.11 } & \tabincell{c}{ 62.49 } & \tabincell{c}{ 57.03 } & \tabincell{c}{ 68.44 } & \tabincell{c}{ \textbf{71.10} }  \\ \hline
    \end{tabular}
\end{table*}

Similarly, by replacing the second term in Eq.~\eqref{opt_v1} we get the third variant $\mathbf{M}_3$ which is formulated as
\begin{align}
\label{opt_v3}
\min_{\mathbf{U},\mathbf{V}} \quad& ||\mathbf{X}^T - \mathbf{U}\mathbf{V}^T||^2 + \lambda_{2}||\mathbf{V}||_{2,1}
\\
+&\lambda_1 \sum_{i,j=1}^{n} \mathbf{A}_{ij} \left( 1 - \exp^{\left(-\frac{||\mathbf{V}^T \mathbf{x}_i -\mathbf{V}^T \mathbf{x}_j||^2 }{\delta^2} \right) }\right)  \nonumber\\
\st \quad& \mathbf{U} \geq 0, \mathbf{V} \geq 0, \nonumber
\end{align}

Although the optimization algorithms for the above three variants are not presented due to space limitation, it is relatively easy to derive the corresponding algorithms to solve these optimization problems based on the techniques in Section \ref{section_algo}.

\begin{figure}[htbp]
    \centering
    \includegraphics[width=0.45\textwidth]{m1-m2-m3-}
    \caption{Clustering accuracy w.r.t. different number of selected features on COIL data set.}
    \label{COIL20_OURS_ACC}
\end{figure}

%\Figure[htbp](topskip=0pt, botskip=0pt, midskip=0pt)[width=0.5\textwidth]{COIL20_OURS_ACC.png}
%{Accuracy of the different settings on COIL20.\label{OURS_ACC}}

%\Figure[htbp](topskip=0pt, botskip=0pt, midskip=0pt)[width=0.48\textwidth,trim=15 0 0 10,clip]{COIL20_OURS_NMI.png}
%{Normalized Mutual Information(NMI) of the different settings on COIL20.\label{OURS_NMI}}



\begin{figure}[htbp]
    \centering
    \subfloat[$\lambda_{1}$ on Housing]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_housing_lambda1}
        \label{fig:sensitivity_acc_housing_lambda1}}
    \subfloat[$\lambda_{2}$ on Housing]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_housing_lambda2}
        \label{fig:sensitivity_acc_housing_lambda2}}
    \\
    \subfloat[$\lambda_{1}$ on Wine]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_wine_lambda1}
        \label{fig:sensitivity_acc_wine_lambda1}}
    \subfloat[$\lambda_{2}$ on Wine]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_wine_lambda2}
        \label{fig:sensitivity_acc_wine_lambda2}}
    \\
    \subfloat[$\lambda_{1}$ on USPS]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_USPS49_lambda1}
        \label{fig:sensitivity_acc_USPS49_lambda1}}
    \subfloat[$\lambda_{2}$ on USPS]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_USPS49_lambda2}
        \label{fig:sensitivity_acc_USPS49_lambda2}}
    \\
    \subfloat[$\lambda_{1}$ on Zoo]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_zoo_lambda1}
        \label{fig:sensitivity_acc_zoo_lambda1}}
    \subfloat[$\lambda_{2}$ on Zoo]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_zoo_lambda2}
        \label{fig:sensitivity_acc_zoo_lambda2}}
    \\
    \subfloat[$\lambda_{1}$ on COIL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_COIL20_lambda1}
        \label{fig:sensitivity_acc_COIL20_lambda1}}
    \subfloat[$\lambda_{2}$ on COIL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_COIL20_lambda2}
        \label{fig:sensitivity_acc_COIL20_lambda2}}
    \caption{Clustering accuracy w.r.t. $\lambda_{1}$, $\lambda_{2}$}
    \label{sensitivity_1}
\end{figure}

Here, we take the COIL data set as an example to show the results of different variants, i.e., $\mathbf{M}_1$ in Eq.~\eqref{opt_v1}, $\mathbf{M}_2$ in Eq.~\eqref{opt_variant2}, $\mathbf{M}_3$ in Eq.~\eqref{opt_v3} and our method in Eq.~\eqref{opt_usv}. For all these compared variants, we report the best results via the same grid search procedure. The k-means clustering results with different numbers of selected features measured in terms of ACC is presented in Figure~\ref{COIL20_OURS_ACC}.

It can be seen that the variant $\mathbf{M}_2$ is better than the results of $\mathbf{M}_1$ by replacing the two-factor factorization with the proposed Dual Concept Learning for data reconstruction. Accordingly, the result of variant $\mathbf{M}_3$ is also better than the original variant in $\mathbf{M}_1$ by replacing the local structure preserving term from the squared loss function to the CIM loss function. These two comparative comparison show that the adoption of the first term in $\mathbf{M}_2$ and the second term in $\mathbf{M}_3$ can indeed improve the feature selection results effectively. Moreover, we further present the result of our method in Eq.~\eqref{opt_usv} by considering both the first term and the second term simultaneously. Clearly, the result is better than all these three variants, i.e., the optimization problem $\mathbf{M}_1$ in Eq.~\eqref{opt_v1}, the optimization problem $\mathbf{M}_2$ in Eq.~\eqref{opt_variant2}, the optimization problem $\mathbf{M}_3$ in Eq.~\eqref{opt_v3}. These four carefully designed e experiments well demonstrate the improvement of the new proposed terms and method.


\subsection{Parameter Sensitivity and Convergence Analysis}
In the subsection, we investigate the sensitivities of the parameter $\gamma$ and the regularization parameters $\lambda_{1}$ and $\lambda_{2}$ in our proposed algorithm. We plot the clustering accuracy(ACC) with different values of these parameters on all data sets in Figure~\ref{sensitivity_1}, Figure~\ref{sensitivity_2} and Figure~\ref{sensitivity_gamma_aio}. The results show that our proposed algorithm is not very sensitive to $\lambda_{1}$, $\lambda_{2}$ and $\gamma$ with wide ranges. In particular, we can see that the performance is also relatively stable to the number of selected features.

Figure~\ref{fig:CONVERGENCE} shows the variation trend of the objective function value of our proposed method with increasing number of iterations on four data sets. The results show that the objective function is monotonically decreasing before convergence.

\subsection{Cross-validation based Parameter Selection for Clustering with different}
To investigate the proposed method thoroughly, we conduct experiments.

We perform cross-validation experiments on the dataset USPS49 to better evaluate the performance of the algorithm. We use the data subset as the training set, and get the feature subsets with the smallest training error corresponding to different numbers of selected features by our proposed algorithm. After that, we cluster the remaining samples as test set using the subset of features we just got. In order to study the performance of the algorithm on different sizes of training sets, we use 100, 200, ..., 1000 samples as the test set to conduct experiments respectively. In addition, for each training set of different sizes,  we perform ten random selections for the purpose of alleviating the effects of random effects.

Tables~\ref{table:cv-acc} and \ref{table:cv-nmi} show the ACC and NMI test results for different numbers of selection features for different size training sets, respectively.

\section{Conclusion}
In this paper, we propose a novel unsupervised feature selection algorithm based on the local sensitive dual concept learning. In our new method, the original data matrix is approximated by the feature-side and sample-side dual concept factorization, which explores the duality between sample and feature and inherits the merit of co-clustering mechanism. The local structure is captured by the graph based regularization with the local sensitive CIM function, which emphasizes on similar local pairs with small errors. As can be seen, our method can obtain more faithful and compact preservation of the data structure, which often lead to better performance on selected features verified by extensive experimental results.

As an important direction of future work, we plan to further develop better hyper-parameter free unsupervised feature selection methods.





\begin{figure}[htbp]
\centering
\subfloat[Convergence on ORL]{\includegraphics[width=0.24\textwidth]{convergence_ORL}
\label{fig:convergence_ORL}}
\subfloat[Convergence on JAFFE]{
\includegraphics[width=0.24\textwidth]{convergence_JAFFE}
\label{fig:convergence_JAFFE}}
\\
\subfloat[Convergence on TOX]{
\includegraphics[width=0.24\textwidth]{convergence_TOX}
\label{fig:convergence_TOX}}
\subfloat[Convergence on GLIOMA]{
\includegraphics[width=0.24\textwidth,trim=5 0 0 10,clip]{convergence_GLIOMA}
\label{fig:convergence_GLIOMA}}
\caption{The convergence curves of our method on 4 different data sets.}
\label{fig:CONVERGENCE}
\end{figure}

\begin{table*}
    \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
    \caption{ The aggregate of the clustering results evaluated by  Normalized Mutual Information $(\%)$. }
    \tiny \centering \label{table:res-nmi}
    \begin{tabular}{ c ||  c || c || c  c  c  c  c  c || c  c }
        \hline
        Data Sets & AllFea & LapScore  & MCFS & UDFS & NDFS & RUFS & SOGFS & ULAP & L1UFS & LSDCL\\ \hline
        Housing& \tabincell{c}{ 10.74 } & \tabincell{c}{ 10.74 $\pm$ 0.00 \\0.00 } & \tabincell{c}{ 10.24 $\pm$ 0.63 \\0.00 } & \tabincell{c}{ 14.14 $\pm$ 8.72 \\0.02 } & \tabincell{c}{ 16.31 $\pm$ 7.55 \\0.03 } & \tabincell{c}{ \textbf{20.21} \textbf{$\pm$ 7.27 } \\ \textbf{0.18 }} & \tabincell{c}{ 10.74 $\pm$ 0.00 \\0.00 } & \tabincell{c}{ 8.62 $\pm$ 4.37 \\0.00 } & \tabincell{c}{ 17.11 $\pm$ 7.11 \\0.02 } & \tabincell{c}{ \textbf{22.81} \textbf{$\pm$ 7.38 } \\ \textbf{1.00 }} \\ \hline

        WINE & \tabincell{c}{ 42.61 } & \tabincell{c}{ 42.63 $\pm$ 0.06 \\0.00 } & \tabincell{c}{ 50.37 $\pm$ 16.91 \\0.01 } & \tabincell{c}{ 50.56 $\pm$ 14.29 \\0.01 } & \tabincell{c}{ 56.67 $\pm$ 7.91 \\0.02 } & \tabincell{c}{ \textbf{66.48} \textbf{$\pm$ 7.91 } \\ \textbf{1.00 }} & \tabincell{c}{ 49.34 $\pm$ 6.11 \\0.00 } & \tabincell{c}{ 43.42 $\pm$ 9.07 \\0.00 } & \tabincell{c}{ \textbf{65.93} \textbf{$\pm$ 10.62 } \\ \textbf{0.65 }} & \tabincell{c}{ \textbf{66.37} \textbf{$\pm$ 8.07 } \\ \textbf{0.85 }} \\ \hline

        USPS49 & \tabincell{c}{ 23.51 } & \tabincell{c}{ 15.88 $\pm$ 17.98 \\0.00 } & \tabincell{c}{ 35.69 $\pm$ 15.24 \\0.00 } & \tabincell{c}{ \textbf{68.65} \textbf{$\pm$ 9.24 } \\ \textbf{0.06 }} & \tabincell{c}{ 51.66 $\pm$ 16.58 \\0.00 } & \tabincell{c}{ 38.80 $\pm$ 9.29 \\0.00 } & \tabincell{c}{ 16.52 $\pm$ 11.42 \\0.00 } & \tabincell{c}{ 4.28 $\pm$ 2.15 \\0.00 } & \tabincell{c}{ \textbf{67.36} \textbf{$\pm$ 16.75 } \\ \textbf{0.20 }} & \tabincell{c}{ \textbf{74.20} \textbf{$\pm$ 2.77 } \\ \textbf{1.00 }} \\ \hline

        ZOO & \tabincell{c}{ 72.45 } & \tabincell{c}{ 69.89 $\pm$ 8.41 \\0.00 } & \tabincell{c}{ 67.81 $\pm$ 9.64 \\0.00 } & \tabincell{c}{ \textbf{81.92} \textbf{$\pm$ 8.98 } \\ \textbf{0.05 }} & \tabincell{c}{ 77.59 $\pm$ 9.24 \\0.00 } & \tabincell{c}{ \textbf{83.14} \textbf{$\pm$ 9.59 } \\ \textbf{1.00 }} & \tabincell{c}{ 79.65 $\pm$ 10.55 \\0.04 } & \tabincell{c}{ 78.46 $\pm$ 12.97 \\0.02 } & \tabincell{c}{ 81.59 $\pm$ 8.68 \\0.02 } & \tabincell{c}{ \textbf{83.03} \textbf{$\pm$ 9.54 } \\ \textbf{0.40 }} \\ \hline

        COIL20 & \tabincell{c}{ 75.58 } & \tabincell{c}{ 62.21 $\pm$ 4.98 \\0.00 } & \tabincell{c}{ 69.15 $\pm$ 5.37 \\0.00 } & \tabincell{c}{ 44.63 $\pm$ 13.13 \\0.00 } & \tabincell{c}{ 70.84 $\pm$ 4.97 \\0.00 } & \tabincell{c}{ 70.54 $\pm$ 4.48 \\0.00 } & \tabincell{c}{ 68.53 $\pm$ 3.86 \\0.00 } & \tabincell{c}{ 45.22 $\pm$ 13.77 \\0.00 } & \tabincell{c}{ 70.58 $\pm$ 5.63 \\0.00 } & \tabincell{c}{ \textbf{73.55} \textbf{$\pm$ 3.51 } \\ \textbf{1.00 }} \\ \hline

        ECOLI& \tabincell{c}{ 55.48 } & \tabincell{c}{ 59.23 $\pm$ 1.25 \\0.00 } & \tabincell{c}{ \textbf{57.06} \textbf{$\pm$ 6.94 } \\ \textbf{0.14 }} & \tabincell{c}{ 59.27 $\pm$ 1.55 \\0.00 } & \tabincell{c}{ 59.56 $\pm$ 0.91 \\0.00 } & \tabincell{c}{ \textbf{59.90} \textbf{$\pm$ 1.36 } \\ \textbf{0.11 }} & \tabincell{c}{ 59.24 $\pm$ 0.89 \\0.00 } & \tabincell{c}{ \textbf{58.52} \textbf{$\pm$ 3.60 } \\ \textbf{0.09 }} & \tabincell{c}{ 58.92 $\pm$ 2.01 \\0.01 } & \tabincell{c}{ \textbf{60.15} \textbf{$\pm$ 0.99 } \\ \textbf{1.00 }} \\ \hline

        ORL & \tabincell{c}{ 74.26 } & \tabincell{c}{ 65.52 $\pm$ 2.06 \\0.00 } & \tabincell{c}{ 71.02 $\pm$ 3.92 \\0.04 } & \tabincell{c}{ \textbf{71.98} \textbf{$\pm$ 2.62 } \\ \textbf{0.68 }} & \tabincell{c}{ 69.91 $\pm$ 2.77 \\0.00 } & \tabincell{c}{ 70.26 $\pm$ 3.08 \\0.00 } & \tabincell{c}{ 70.06 $\pm$ 2.76 \\0.00 } & \tabincell{c}{ 65.56 $\pm$ 2.37 \\0.00 } & \tabincell{c}{ \textbf{72.01} \textbf{$\pm$ 1.82 } \\ \textbf{0.77 }} & \tabincell{c}{ \textbf{72.15} \textbf{$\pm$ 2.68 } \\ \textbf{1.00 }} \\ \hline

        JAFFE & \tabincell{c}{ 81.52 } & \tabincell{c}{ 77.28 $\pm$ 8.98 \\0.01 } & \tabincell{c}{ 83.39 $\pm$ 3.35 \\0.01 } & \tabincell{c}{ 80.46 $\pm$ 1.92 \\0.00 } & \tabincell{c}{ 82.20 $\pm$ 3.72 \\0.01 } & \tabincell{c}{ 81.43 $\pm$ 4.63 \\0.00 } & \tabincell{c}{ 82.21 $\pm$ 1.55 \\0.00 } & \tabincell{c}{ 73.54 $\pm$ 3.72 \\0.00 } & \tabincell{c}{ \textbf{85.11} \textbf{$\pm$ 1.10 } \\ \textbf{0.64 }} & \tabincell{c}{ \textbf{85.31} \textbf{$\pm$ 1.89 } \\ \textbf{1.00 }} \\ \hline

        TOX & \tabincell{c}{ 15.87 } & \tabincell{c}{ 10.44 $\pm$ 0.79 \\0.00 } & \tabincell{c}{ 10.48 $\pm$ 1.35 \\0.00 } & \tabincell{c}{ 19.85 $\pm$ 1.28 \\0.00 } & \tabincell{c}{ 17.62 $\pm$ 2.57 \\0.00 } & \tabincell{c}{ \textbf{28.82} \textbf{$\pm$ 1.32 } \\ \textbf{0.77 }} & \tabincell{c}{ 23.83 $\pm$ 1.60 \\0.00 } & \tabincell{c}{ 24.23 $\pm$ 3.96 \\0.00 } & \tabincell{c}{ 14.79 $\pm$ 3.42 \\0.00 } & \tabincell{c}{ \textbf{29.05} \textbf{$\pm$ 1.36 } \\ \textbf{1.00 }} \\ \hline

        GLIOMA & \tabincell{c}{ 50.32 } & \tabincell{c}{ 47.05 \\ $\pm$ 2.78 \\0.00 } & \tabincell{c}{ 32.62 \\ $\pm$ 2.09 \\0.00 } & \tabincell{c}{ 32.10 \\ $\pm$ 3.41 \\0.00 } & \tabincell{c}{ 53.93 \\ $\pm$ 1.46 \\0.00 } & \tabincell{c}{ 54.04 \\ $\pm$ 1.14 \\0.00 } & \tabincell{c}{ 52.37 \\ $\pm$ 0.95 \\0.00 } & \tabincell{c}{ 25.33 \\ $\pm$ 3.30 \\0.00 } & \tabincell{c}{ 55.10 \\ $\pm$ 1.05 \\0.00 } & \tabincell{c}{ \textbf{56.67} \\ \textbf{$\pm$ 1.18 } \\ \textbf{1.00 }} \\ \hline

        Average & \tabincell{c}{ 50.23 } & \tabincell{c}{ 46.09 } & \tabincell{c}{ 48.78 } & \tabincell{c}{ 52.36 } & \tabincell{c}{ 55.63 } & \tabincell{c}{ 57.36 } & \tabincell{c}{ 51.25 } & \tabincell{c}{ 42.72 } & \tabincell{c}{ 58.85 } & \tabincell{c}{ \textbf{62.33} }  \\ \hline
    \end{tabular}
\end{table*}






















\begin{table*}
\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
\caption{ The aggregate of the cross validation results evaluated by Accuracy $(\%)$. }
\tiny \centering \label{table:cv-acc}
\begin{tabular}{ c | c  c  c  c  c  c  c  c  c  c }
\hline
\diagbox[width=10em,trim=l]{feature number}{nTrain} & 100 & 200 & 300 & 400 & 500 & 600 & 700 & 800 & 900 & 1000\\ \hline

5& \tabincell{c}{ 90.26 $\pm$ 4.43 }& \tabincell{c}{ 92.33 $\pm$ 1.85 }& \tabincell{c}{ 93.12 $\pm$ 2.27 }& \tabincell{c}{ 92.92 $\pm$ 2.44 }& \tabincell{c}{ 93.65 $\pm$ 1.46 }& \tabincell{c}{ 94.23 $\pm$ 1.07 }& \tabincell{c}{ 92.51 $\pm$ 3.34 }& \tabincell{c}{ 94.43 $\pm$ 0.54 }& \tabincell{c}{ 94.56 $\pm$ 1.10 }& \tabincell{c}{ 93.87 $\pm$ 1.28 }\\
10& \tabincell{c}{ 92.99 $\pm$ 1.67 }& \tabincell{c}{ 94.63 $\pm$ 0.59 }& \tabincell{c}{ 93.37 $\pm$ 1.23 }& \tabincell{c}{ 93.65 $\pm$ 2.79 }& \tabincell{c}{ 94.60 $\pm$ 0.81 }& \tabincell{c}{ 94.52 $\pm$ 0.74 }& \tabincell{c}{ 94.43 $\pm$ 0.75 }& \tabincell{c}{ 95.15 $\pm$ 0.72 }& \tabincell{c}{ 95.53 $\pm$ 0.77 }& \tabincell{c}{ 95.32 $\pm$ 0.99 }\\
15& \tabincell{c}{ 93.15 $\pm$ 1.43 }& \tabincell{c}{ 93.88 $\pm$ 1.34 }& \tabincell{c}{ 94.34 $\pm$ 1.22 }& \tabincell{c}{ 94.59 $\pm$ 1.33 }& \tabincell{c}{ 94.45 $\pm$ 1.28 }& \tabincell{c}{ 93.67 $\pm$ 3.94 }& \tabincell{c}{ 94.51 $\pm$ 1.10 }& \tabincell{c}{ 95.00 $\pm$ 0.87 }& \tabincell{c}{ 94.27 $\pm$ 2.85 }& \tabincell{c}{ 95.39 $\pm$ 0.87 }\\
20& \tabincell{c}{ 92.79 $\pm$ 2.14 }& \tabincell{c}{ 94.34 $\pm$ 1.06 }& \tabincell{c}{ 94.70 $\pm$ 0.51 }& \tabincell{c}{ 94.78 $\pm$ 0.91 }& \tabincell{c}{ 94.03 $\pm$ 2.42 }& \tabincell{c}{ 94.33 $\pm$ 2.74 }& \tabincell{c}{ 95.30 $\pm$ 0.64 }& \tabincell{c}{ 94.51 $\pm$ 2.41 }& \tabincell{c}{ 94.56 $\pm$ 3.99 }& \tabincell{c}{ 95.08 $\pm$ 1.28 }\\
25& \tabincell{c}{ 92.71 $\pm$ 1.10 }& \tabincell{c}{ 93.69 $\pm$ 1.31 }& \tabincell{c}{ 94.17 $\pm$ 1.24 }& \tabincell{c}{ 94.76 $\pm$ 1.04 }& \tabincell{c}{ 95.15 $\pm$ 0.96 }& \tabincell{c}{ 94.53 $\pm$ 2.38 }& \tabincell{c}{ 92.03 $\pm$ 5.54 }& \tabincell{c}{ 93.88 $\pm$ 3.94 }& \tabincell{c}{ 94.48 $\pm$ 3.66 }& \tabincell{c}{ 95.73 $\pm$ 0.66 }\\
30& \tabincell{c}{ 92.84 $\pm$ 1.46 }& \tabincell{c}{ 93.58 $\pm$ 1.29 }& \tabincell{c}{ 94.46 $\pm$ 1.69 }& \tabincell{c}{ 94.80 $\pm$ 1.06 }& \tabincell{c}{ 95.50 $\pm$ 0.88 }& \tabincell{c}{ 94.04 $\pm$ 3.05 }& \tabincell{c}{ 93.93 $\pm$ 3.68 }& \tabincell{c}{ 95.00 $\pm$ 1.17 }& \tabincell{c}{ 94.63 $\pm$ 2.79 }& \tabincell{c}{ 95.72 $\pm$ 0.60 }\\
35& \tabincell{c}{ 92.25 $\pm$ 2.98 }& \tabincell{c}{ 94.38 $\pm$ 1.66 }& \tabincell{c}{ 93.34 $\pm$ 2.82 }& \tabincell{c}{ 94.45 $\pm$ 1.08 }& \tabincell{c}{ 95.09 $\pm$ 1.25 }& \tabincell{c}{ 94.43 $\pm$ 1.41 }& \tabincell{c}{ 94.83 $\pm$ 1.24 }& \tabincell{c}{ 95.38 $\pm$ 1.02 }& \tabincell{c}{ 94.69 $\pm$ 3.01 }& \tabincell{c}{ 95.54 $\pm$ 0.74 }\\
40& \tabincell{c}{ 91.10 $\pm$ 2.97 }& \tabincell{c}{ 93.04 $\pm$ 2.35 }& \tabincell{c}{ 94.07 $\pm$ 1.74 }& \tabincell{c}{ 94.00 $\pm$ 1.15 }& \tabincell{c}{ 94.47 $\pm$ 0.78 }& \tabincell{c}{ 93.48 $\pm$ 2.01 }& \tabincell{c}{ 92.08 $\pm$ 4.07 }& \tabincell{c}{ 95.16 $\pm$ 1.24 }& \tabincell{c}{ 94.38 $\pm$ 3.24 }& \tabincell{c}{ 93.90 $\pm$ 1.43 }\\
45& \tabincell{c}{ 91.16 $\pm$ 3.22 }& \tabincell{c}{ 92.27 $\pm$ 3.23 }& \tabincell{c}{ 94.49 $\pm$ 0.69 }& \tabincell{c}{ 92.96 $\pm$ 2.37 }& \tabincell{c}{ 93.96 $\pm$ 0.81 }& \tabincell{c}{ 92.59 $\pm$ 2.83 }& \tabincell{c}{ 92.37 $\pm$ 3.93 }& \tabincell{c}{ 91.81 $\pm$ 4.59 }& \tabincell{c}{ 94.36 $\pm$ 1.58 }& \tabincell{c}{ 93.51 $\pm$ 1.42 }\\
50& \tabincell{c}{ 89.51 $\pm$ 3.83 }& \tabincell{c}{ 89.53 $\pm$ 4.72 }& \tabincell{c}{ 93.66 $\pm$ 1.19 }& \tabincell{c}{ 92.51 $\pm$ 2.67 }& \tabincell{c}{ 91.29 $\pm$ 2.75 }& \tabincell{c}{ 90.44 $\pm$ 4.16 }& \tabincell{c}{ 90.15 $\pm$ 8.07 }& \tabincell{c}{ 91.80 $\pm$ 3.25 }& \tabincell{c}{ 92.06 $\pm$ 4.07 }& \tabincell{c}{ 92.55 $\pm$ 1.51 }\\ \hline
Average& \tabincell{c}{ 91.88 }& \tabincell{c}{ 93.17 }& \tabincell{c}{ 93.97 }& \tabincell{c}{ 93.94 }& \tabincell{c}{ 94.22 }& \tabincell{c}{ 93.63 }& \tabincell{c}{ 93.21 }& \tabincell{c}{ 94.21 }& \tabincell{c}{ 94.35 }& \tabincell{c}{ 94.66 }\\ \hline
\end{tabular}
\end{table*}




\begin{table*}
\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
\caption{ The aggregate of the cross validation results evaluated by Normalized Mutual Information $(\%)$. }
\tiny \centering \label{table:cv-nmi}
\begin{tabular}{ c | c  c  c  c  c  c  c  c  c  c }
\hline
\diagbox[width=10em,trim=l]{feature number}{nTrain} & 100 & 200 & 300 & 400 & 500 & 600 & 700 & 800 & 900 & 1000\\ \hline

5& \tabincell{c}{ 56.48 $\pm$ 10.26 }& \tabincell{c}{ 62.28 $\pm$ 6.78 }& \tabincell{c}{ 64.69 $\pm$ 7.21 }& \tabincell{c}{ 64.83 $\pm$ 5.92 }& \tabincell{c}{ 66.65 $\pm$ 4.50 }& \tabincell{c}{ 67.95 $\pm$ 3.61 }& \tabincell{c}{ 63.37 $\pm$ 8.61 }& \tabincell{c}{ 69.00 $\pm$ 2.89 }& \tabincell{c}{ 70.00 $\pm$ 4.34 }& \tabincell{c}{ 65.59 $\pm$ 6.33 }\\
10& \tabincell{c}{ 64.30 $\pm$ 6.26 }& \tabincell{c}{ 69.88 $\pm$ 3.06 }& \tabincell{c}{ 66.60 $\pm$ 4.85 }& \tabincell{c}{ 67.94 $\pm$ 6.87 }& \tabincell{c}{ 70.28 $\pm$ 3.27 }& \tabincell{c}{ 69.93 $\pm$ 2.67 }& \tabincell{c}{ 70.65 $\pm$ 2.24 }& \tabincell{c}{ 72.66 $\pm$ 3.04 }& \tabincell{c}{ 74.19 $\pm$ 3.06 }& \tabincell{c}{ 73.29 $\pm$ 4.06 }\\
15& \tabincell{c}{ 65.41 $\pm$ 5.98 }& \tabincell{c}{ 68.76 $\pm$ 2.87 }& \tabincell{c}{ 70.75 $\pm$ 3.18 }& \tabincell{c}{ 70.38 $\pm$ 5.37 }& \tabincell{c}{ 69.96 $\pm$ 5.16 }& \tabincell{c}{ 68.51 $\pm$ 9.13 }& \tabincell{c}{ 70.35 $\pm$ 3.12 }& \tabincell{c}{ 71.86 $\pm$ 3.38 }& \tabincell{c}{ 73.40 $\pm$ 4.45 }& \tabincell{c}{ 74.60 $\pm$ 2.83 }\\
20& \tabincell{c}{ 63.79 $\pm$ 8.01 }& \tabincell{c}{ 70.00 $\pm$ 3.50 }& \tabincell{c}{ 70.07 $\pm$ 3.68 }& \tabincell{c}{ 72.97 $\pm$ 2.27 }& \tabincell{c}{ 69.02 $\pm$ 5.89 }& \tabincell{c}{ 71.49 $\pm$ 7.88 }& \tabincell{c}{ 72.30 $\pm$ 4.78 }& \tabincell{c}{ 71.60 $\pm$ 5.53 }& \tabincell{c}{ 72.48 $\pm$ 8.92 }& \tabincell{c}{ 73.14 $\pm$ 4.39 }\\
25& \tabincell{c}{ 62.93 $\pm$ 4.17 }& \tabincell{c}{ 67.03 $\pm$ 5.19 }& \tabincell{c}{ 68.63 $\pm$ 4.49 }& \tabincell{c}{ 71.27 $\pm$ 3.72 }& \tabincell{c}{ 71.83 $\pm$ 4.01 }& \tabincell{c}{ 70.93 $\pm$ 6.68 }& \tabincell{c}{ 66.96 $\pm$ 11.80 }& \tabincell{c}{ 71.63 $\pm$ 8.47 }& \tabincell{c}{ 72.89 $\pm$ 9.24 }& \tabincell{c}{ 75.36 $\pm$ 3.16 }\\
30& \tabincell{c}{ 63.12 $\pm$ 5.24 }& \tabincell{c}{ 65.62 $\pm$ 4.04 }& \tabincell{c}{ 71.04 $\pm$ 5.40 }& \tabincell{c}{ 70.84 $\pm$ 3.65 }& \tabincell{c}{ 74.23 $\pm$ 3.66 }& \tabincell{c}{ 68.40 $\pm$ 9.03 }& \tabincell{c}{ 69.25 $\pm$ 9.25 }& \tabincell{c}{ 72.25 $\pm$ 4.51 }& \tabincell{c}{ 72.93 $\pm$ 7.01 }& \tabincell{c}{ 74.86 $\pm$ 2.77 }\\
35& \tabincell{c}{ 62.86 $\pm$ 10.76 }& \tabincell{c}{ 69.77 $\pm$ 6.36 }& \tabincell{c}{ 66.95 $\pm$ 6.60 }& \tabincell{c}{ 69.89 $\pm$ 3.29 }& \tabincell{c}{ 72.92 $\pm$ 4.13 }& \tabincell{c}{ 70.21 $\pm$ 3.83 }& \tabincell{c}{ 71.68 $\pm$ 4.78 }& \tabincell{c}{ 73.75 $\pm$ 4.21 }& \tabincell{c}{ 73.48 $\pm$ 6.83 }& \tabincell{c}{ 74.76 $\pm$ 3.97 }\\
40& \tabincell{c}{ 56.78 $\pm$ 10.22 }& \tabincell{c}{ 63.82 $\pm$ 8.18 }& \tabincell{c}{ 68.92 $\pm$ 6.61 }& \tabincell{c}{ 68.25 $\pm$ 4.40 }& \tabincell{c}{ 69.56 $\pm$ 3.38 }& \tabincell{c}{ 66.57 $\pm$ 7.37 }& \tabincell{c}{ 65.27 $\pm$ 8.48 }& \tabincell{c}{ 72.73 $\pm$ 5.21 }& \tabincell{c}{ 71.21 $\pm$ 10.47 }& \tabincell{c}{ 68.80 $\pm$ 4.48 }\\
45& \tabincell{c}{ 58.42 $\pm$ 12.69 }& \tabincell{c}{ 62.52 $\pm$ 11.65 }& \tabincell{c}{ 69.16 $\pm$ 3.81 }& \tabincell{c}{ 65.95 $\pm$ 6.73 }& \tabincell{c}{ 67.57 $\pm$ 2.89 }& \tabincell{c}{ 61.94 $\pm$ 9.88 }& \tabincell{c}{ 67.03 $\pm$ 8.72 }& \tabincell{c}{ 63.44 $\pm$ 11.56 }& \tabincell{c}{ 70.91 $\pm$ 5.26 }& \tabincell{c}{ 67.53 $\pm$ 4.71 }\\
50& \tabincell{c}{ 52.70 $\pm$ 11.57 }& \tabincell{c}{ 56.15 $\pm$ 12.78 }& \tabincell{c}{ 66.60 $\pm$ 4.52 }& \tabincell{c}{ 65.79 $\pm$ 4.31 }& \tabincell{c}{ 61.38 $\pm$ 7.10 }& \tabincell{c}{ 57.88 $\pm$ 9.46 }& \tabincell{c}{ 60.42 $\pm$ 14.26 }& \tabincell{c}{ 61.69 $\pm$ 9.02 }& \tabincell{c}{ 60.71 $\pm$ 10.29 }& \tabincell{c}{ 63.12 $\pm$ 5.97 }\\

\hline
Average& \tabincell{c}{ 60.68 }& \tabincell{c}{ 65.58 }& \tabincell{c}{ 68.34 }& \tabincell{c}{ 68.81 }& \tabincell{c}{ 69.34 }& \tabincell{c}{ 67.38 }& \tabincell{c}{ 67.73 }& \tabincell{c}{ 70.06 }& \tabincell{c}{ 71.22 }& \tabincell{c}{ 71.10 }\\ \hline
\end{tabular}
\end{table*}









\begin{table*}
\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
\caption{ Train Accuracy $(\%)$. }
\tiny \centering \label{table:res-acc}
\begin{tabular}{ c | c  c  c  c  c  c  c  c  c  c }
\hline
\diagbox[width=10em,trim=l]{feature number}{nTrain} & 50 & 100 & 150 & 200 & 250 & 300 & 350 & 400 & 450 & 500\\ \hline

5& \tabincell{c}{ 93.38 $\pm$ 4.30 }& \tabincell{c}{ 94.98 $\pm$ 1.13 }& \tabincell{c}{ 94.66 $\pm$ 0.82 }& \tabincell{c}{ 94.58 $\pm$ 1.04 }& \tabincell{c}{ 94.85 $\pm$ 0.83 }& \tabincell{c}{ 94.54 $\pm$ 1.21 }& \tabincell{c}{ 94.99 $\pm$ 0.87 }& \tabincell{c}{ 94.77 $\pm$ 0.77 }& \tabincell{c}{ 94.61 $\pm$ 0.45 }& \tabincell{c}{ 94.50 $\pm$ 0.77 }\\
10& \tabincell{c}{ 94.83 $\pm$ 4.29 }& \tabincell{c}{ 96.53 $\pm$ 0.78 }& \tabincell{c}{ 95.32 $\pm$ 0.79 }& \tabincell{c}{ 95.19 $\pm$ 0.72 }& \tabincell{c}{ 95.64 $\pm$ 0.69 }& \tabincell{c}{ 95.63 $\pm$ 1.03 }& \tabincell{c}{ 95.92 $\pm$ 0.55 }& \tabincell{c}{ 95.66 $\pm$ 0.68 }& \tabincell{c}{ 95.71 $\pm$ 0.53 }& \tabincell{c}{ 95.88 $\pm$ 0.50 }\\
15& \tabincell{c}{ 94.65 $\pm$ 4.08 }& \tabincell{c}{ 96.87 $\pm$ 0.93 }& \tabincell{c}{ 95.83 $\pm$ 0.72 }& \tabincell{c}{ 95.87 $\pm$ 0.85 }& \tabincell{c}{ 95.96 $\pm$ 0.73 }& \tabincell{c}{ 95.95 $\pm$ 0.93 }& \tabincell{c}{ 96.24 $\pm$ 0.60 }& \tabincell{c}{ 96.05 $\pm$ 0.56 }& \tabincell{c}{ 95.87 $\pm$ 0.58 }& \tabincell{c}{ 96.11 $\pm$ 0.51 }\\
20& \tabincell{c}{ 94.72 $\pm$ 3.80 }& \tabincell{c}{ 96.89 $\pm$ 1.03 }& \tabincell{c}{ 95.96 $\pm$ 0.83 }& \tabincell{c}{ 96.12 $\pm$ 0.67 }& \tabincell{c}{ 96.15 $\pm$ 0.62 }& \tabincell{c}{ 96.05 $\pm$ 0.97 }& \tabincell{c}{ 96.22 $\pm$ 0.51 }& \tabincell{c}{ 95.96 $\pm$ 0.33 }& \tabincell{c}{ 96.20 $\pm$ 0.47 }& \tabincell{c}{ 96.20 $\pm$ 0.33 }\\
25& \tabincell{c}{ 94.56 $\pm$ 3.86 }& \tabincell{c}{ 96.86 $\pm$ 0.94 }& \tabincell{c}{ 95.83 $\pm$ 1.02 }& \tabincell{c}{ 95.78 $\pm$ 0.87 }& \tabincell{c}{ 96.17 $\pm$ 0.79 }& \tabincell{c}{ 95.93 $\pm$ 0.97 }& \tabincell{c}{ 96.12 $\pm$ 0.69 }& \tabincell{c}{ 96.29 $\pm$ 0.54 }& \tabincell{c}{ 96.33 $\pm$ 0.68 }& \tabincell{c}{ 96.32 $\pm$ 0.40 }\\
30& \tabincell{c}{ 94.12 $\pm$ 3.97 }& \tabincell{c}{ 96.66 $\pm$ 1.11 }& \tabincell{c}{ 95.78 $\pm$ 1.06 }& \tabincell{c}{ 95.91 $\pm$ 0.64 }& \tabincell{c}{ 96.27 $\pm$ 0.80 }& \tabincell{c}{ 95.60 $\pm$ 1.12 }& \tabincell{c}{ 96.08 $\pm$ 0.91 }& \tabincell{c}{ 96.03 $\pm$ 0.64 }& \tabincell{c}{ 96.23 $\pm$ 0.68 }& \tabincell{c}{ 96.19 $\pm$ 0.45 }\\
35& \tabincell{c}{ 92.27 $\pm$ 4.69 }& \tabincell{c}{ 96.70 $\pm$ 1.35 }& \tabincell{c}{ 95.41 $\pm$ 1.40 }& \tabincell{c}{ 95.34 $\pm$ 1.05 }& \tabincell{c}{ 95.98 $\pm$ 0.84 }& \tabincell{c}{ 95.33 $\pm$ 1.48 }& \tabincell{c}{ 96.09 $\pm$ 0.79 }& \tabincell{c}{ 95.72 $\pm$ 0.72 }& \tabincell{c}{ 95.96 $\pm$ 0.79 }& \tabincell{c}{ 96.06 $\pm$ 0.81 }\\
40& \tabincell{c}{ 90.01 $\pm$ 5.36 }& \tabincell{c}{ 95.88 $\pm$ 1.78 }& \tabincell{c}{ 95.17 $\pm$ 1.55 }& \tabincell{c}{ 94.98 $\pm$ 1.22 }& \tabincell{c}{ 95.56 $\pm$ 0.87 }& \tabincell{c}{ 94.89 $\pm$ 1.60 }& \tabincell{c}{ 95.54 $\pm$ 0.79 }& \tabincell{c}{ 95.53 $\pm$ 0.87 }& \tabincell{c}{ 95.76 $\pm$ 0.86 }& \tabincell{c}{ 95.50 $\pm$ 0.48 }\\
45& \tabincell{c}{ 87.94 $\pm$ 6.65 }& \tabincell{c}{ 94.99 $\pm$ 2.70 }& \tabincell{c}{ 94.92 $\pm$ 1.44 }& \tabincell{c}{ 94.39 $\pm$ 1.66 }& \tabincell{c}{ 94.14 $\pm$ 0.71 }& \tabincell{c}{ 93.90 $\pm$ 1.66 }& \tabincell{c}{ 94.94 $\pm$ 1.27 }& \tabincell{c}{ 95.05 $\pm$ 1.01 }& \tabincell{c}{ 95.22 $\pm$ 0.82 }& \tabincell{c}{ 95.08 $\pm$ 0.94 }\\
50& \tabincell{c}{ 87.68 $\pm$ 6.10 }& \tabincell{c}{ 94.46 $\pm$ 2.61 }& \tabincell{c}{ 93.99 $\pm$ 1.41 }& \tabincell{c}{ 93.94 $\pm$ 2.01 }& \tabincell{c}{ 93.40 $\pm$ 1.24 }& \tabincell{c}{ 93.47 $\pm$ 1.54 }& \tabincell{c}{ 94.37 $\pm$ 1.71 }& \tabincell{c}{ 93.93 $\pm$ 1.09 }& \tabincell{c}{ 93.94 $\pm$ 1.34 }& \tabincell{c}{ 94.23 $\pm$ 0.96 }\\

\hline
Average& \tabincell{c}{ 92.42 }& \tabincell{c}{ 96.08 }& \tabincell{c}{ 95.29 }& \tabincell{c}{ 95.21 }& \tabincell{c}{ 95.41 }& \tabincell{c}{ 95.13 }& \tabincell{c}{ 95.65 }& \tabincell{c}{ 95.50 }& \tabincell{c}{ 95.58 }& \tabincell{c}{ 95.61 }\\ \hline
\end{tabular}
\end{table*}




\begin{table*}
\newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}}
\caption{ Train NMI $(\%)$. }
\tiny \centering \label{table:res-acc}
\begin{tabular}{ c | c  c  c  c  c  c  c  c  c  c }
\hline
\diagbox[width=10em,trim=l]{feature number}{nTrain} & 50 & 100 & 150 & 200 & 250 & 300 & 350 & 400 & 450 & 500\\ \hline

5& \tabincell{c}{ 67.45 $\pm$ 14.24 }& \tabincell{c}{ 72.32 $\pm$ 5.06 }& \tabincell{c}{ 70.58 $\pm$ 3.37 }& \tabincell{c}{ 70.39 $\pm$ 4.39 }& \tabincell{c}{ 71.48 $\pm$ 3.49 }& \tabincell{c}{ 70.27 $\pm$ 5.08 }& \tabincell{c}{ 71.75 $\pm$ 3.67 }& \tabincell{c}{ 70.97 $\pm$ 3.01 }& \tabincell{c}{ 70.41 $\pm$ 1.52 }& \tabincell{c}{ 70.10 $\pm$ 2.93 }\\
10& \tabincell{c}{ 74.64 $\pm$ 17.35 }& \tabincell{c}{ 79.31 $\pm$ 3.30 }& \tabincell{c}{ 74.24 $\pm$ 3.81 }& \tabincell{c}{ 73.05 $\pm$ 2.99 }& \tabincell{c}{ 74.90 $\pm$ 3.27 }& \tabincell{c}{ 74.81 $\pm$ 4.97 }& \tabincell{c}{ 75.84 $\pm$ 2.42 }& \tabincell{c}{ 74.77 $\pm$ 2.95 }& \tabincell{c}{ 74.86 $\pm$ 2.32 }& \tabincell{c}{ 75.63 $\pm$ 2.22 }\\
15& \tabincell{c}{ 74.29 $\pm$ 16.14 }& \tabincell{c}{ 81.17 $\pm$ 4.78 }& \tabincell{c}{ 75.97 $\pm$ 2.81 }& \tabincell{c}{ 75.75 $\pm$ 3.97 }& \tabincell{c}{ 76.22 $\pm$ 3.12 }& \tabincell{c}{ 76.06 $\pm$ 4.07 }& \tabincell{c}{ 77.22 $\pm$ 2.71 }& \tabincell{c}{ 76.55 $\pm$ 2.45 }& \tabincell{c}{ 75.64 $\pm$ 2.62 }& \tabincell{c}{ 76.75 $\pm$ 2.37 }\\
20& \tabincell{c}{ 73.65 $\pm$ 15.74 }& \tabincell{c}{ 80.93 $\pm$ 5.69 }& \tabincell{c}{ 76.77 $\pm$ 3.32 }& \tabincell{c}{ 77.01 $\pm$ 3.05 }& \tabincell{c}{ 76.91 $\pm$ 2.79 }& \tabincell{c}{ 76.53 $\pm$ 4.15 }& \tabincell{c}{ 77.26 $\pm$ 2.38 }& \tabincell{c}{ 76.12 $\pm$ 1.62 }& \tabincell{c}{ 77.22 $\pm$ 2.15 }& \tabincell{c}{ 77.12 $\pm$ 1.40 }\\
25& \tabincell{c}{ 72.79 $\pm$ 16.60 }& \tabincell{c}{ 81.22 $\pm$ 5.06 }& \tabincell{c}{ 76.11 $\pm$ 4.14 }& \tabincell{c}{ 75.68 $\pm$ 3.80 }& \tabincell{c}{ 77.19 $\pm$ 3.59 }& \tabincell{c}{ 76.11 $\pm$ 4.25 }& \tabincell{c}{ 77.02 $\pm$ 2.74 }& \tabincell{c}{ 77.58 $\pm$ 2.39 }& \tabincell{c}{ 77.98 $\pm$ 3.03 }& \tabincell{c}{ 77.69 $\pm$ 1.75 }\\
30& \tabincell{c}{ 71.21 $\pm$ 14.72 }& \tabincell{c}{ 79.77 $\pm$ 5.36 }& \tabincell{c}{ 76.10 $\pm$ 4.08 }& \tabincell{c}{ 76.23 $\pm$ 3.37 }& \tabincell{c}{ 77.66 $\pm$ 3.71 }& \tabincell{c}{ 74.82 $\pm$ 5.11 }& \tabincell{c}{ 76.48 $\pm$ 4.07 }& \tabincell{c}{ 76.39 $\pm$ 2.74 }& \tabincell{c}{ 77.45 $\pm$ 3.09 }& \tabincell{c}{ 76.98 $\pm$ 2.02 }\\
35& \tabincell{c}{ 66.38 $\pm$ 17.85 }& \tabincell{c}{ 80.47 $\pm$ 6.79 }& \tabincell{c}{ 74.51 $\pm$ 5.57 }& \tabincell{c}{ 73.82 $\pm$ 4.66 }& \tabincell{c}{ 76.21 $\pm$ 3.85 }& \tabincell{c}{ 73.78 $\pm$ 6.48 }& \tabincell{c}{ 76.68 $\pm$ 3.49 }& \tabincell{c}{ 75.32 $\pm$ 2.73 }& \tabincell{c}{ 76.21 $\pm$ 3.79 }& \tabincell{c}{ 76.66 $\pm$ 3.41 }\\
40& \tabincell{c}{ 57.45 $\pm$ 17.52 }& \tabincell{c}{ 76.43 $\pm$ 8.17 }& \tabincell{c}{ 73.14 $\pm$ 6.42 }& \tabincell{c}{ 72.04 $\pm$ 4.98 }& \tabincell{c}{ 74.52 $\pm$ 3.68 }& \tabincell{c}{ 71.84 $\pm$ 6.72 }& \tabincell{c}{ 74.22 $\pm$ 3.35 }& \tabincell{c}{ 74.44 $\pm$ 3.68 }& \tabincell{c}{ 75.63 $\pm$ 3.82 }& \tabincell{c}{ 74.02 $\pm$ 2.06 }\\
45& \tabincell{c}{ 52.65 $\pm$ 20.09 }& \tabincell{c}{ 73.91 $\pm$ 10.48 }& \tabincell{c}{ 72.96 $\pm$ 5.27 }& \tabincell{c}{ 70.35 $\pm$ 6.21 }& \tabincell{c}{ 68.74 $\pm$ 3.22 }& \tabincell{c}{ 67.65 $\pm$ 6.77 }& \tabincell{c}{ 71.99 $\pm$ 5.28 }& \tabincell{c}{ 72.23 $\pm$ 4.18 }& \tabincell{c}{ 72.98 $\pm$ 3.51 }& \tabincell{c}{ 72.60 $\pm$ 3.80 }\\
50& \tabincell{c}{ 51.48 $\pm$ 18.39 }& \tabincell{c}{ 70.92 $\pm$ 9.80 }& \tabincell{c}{ 68.90 $\pm$ 5.98 }& \tabincell{c}{ 68.84 $\pm$ 7.01 }& \tabincell{c}{ 65.81 $\pm$ 4.83 }& \tabincell{c}{ 66.20 $\pm$ 6.00 }& \tabincell{c}{ 69.67 $\pm$ 6.64 }& \tabincell{c}{ 67.88 $\pm$ 4.26 }& \tabincell{c}{ 68.24 $\pm$ 5.00 }& \tabincell{c}{ 69.36 $\pm$ 3.60 }\\

\hline
Average& \tabincell{c}{ 66.20 }& \tabincell{c}{ 77.65 }& \tabincell{c}{ 73.93 }& \tabincell{c}{ 73.32 }& \tabincell{c}{ 73.96 }& \tabincell{c}{ 72.81 }& \tabincell{c}{ 95.65 }& \tabincell{c}{ 95.50 }& \tabincell{c}{ 95.58 }& \tabincell{c}{ 95.61 }\\ \hline
\end{tabular}
\end{table*}



\begin{figure}[htbp]
\centering
\subfloat[$\lambda_{1}$ on ECOLI]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ecoli_lambda1}
\label{fig:sensitivity_acc_ecoli_lambda1}}
\subfloat[$\lambda_{2}$ on ECOLI]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ecoli_lambda2}
\label{fig:sensitivity_acc_ecoli_lambda2}}
\\
\subfloat[$\lambda_{1}$ on ORL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ORL_lambda1}
    \label{fig:sensitivity_acc_ORL_lambda1}}
\subfloat[$\lambda_{2}$ on ORL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ORL_lambda2}
    \label{fig:sensitivity_acc_ORL_lambda2}}
\\\subfloat[$\lambda_{1}$ on JAFFE]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_JAFFE_lambda1}
    \label{fig:sensitivity_acc_JAFFE_lambda1}}
\subfloat[$\lambda_{2}$ on JAFFE]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_JAFFE_lambda2}
    \label{fig:sensitivity_acc_JAFFE_lambda2}}
\\
\subfloat[$\lambda_{1}$ on TOX]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_TOX_lambda1}
    \label{fig:sensitivity_acc_TOX_lambda1}}
\subfloat[$\lambda_{2}$ on TOX]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_TOX_lambda2}
    \label{fig:sensitivity_acc_TOX_lambda2}}
\\
\subfloat[$\lambda_{1}$ on GLIOMA]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_GLIOMA_lambda1}
    \label{fig:sensitivity_acc_GLIOMA_lambda1}}
\subfloat[$\lambda_{2}$ on GLIOMA]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_GLIOMA_lambda2}
    \label{fig:sensitivity_acc_GLIOMA_lambda2}}
\caption{Clustering accuracy w.r.t. $\lambda_{1}$, $\lambda_{2}$}
\label{sensitivity_2}
\end{figure}



\begin{figure}[htbp]
    \centering
    \subfloat[$\gamma$ on Housing]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_housing_gamma}
        \label{fig:sensitivity_acc_housing_gamma}}
    \subfloat[$\gamma$ on Wine]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_wine_gamma}
        \label{fig:sensitivity_acc_wine_gamma}}
    \\
    \subfloat[$\gamma$ on USPS]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_USPS49_gamma}
        \label{fig:sensitivity_acc_USPS49_gamma}}
    \subfloat[$\gamma$ on Zoo]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_zoo_gamma}
        \label{fig:sensitivity_acc_zoo_gamma}}
    \\
    \subfloat[$\gamma$ on COIL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_COIL20_gamma}
        \label{fig:sensitivity_acc_COIL20_gamma}}
    \subfloat[$\gamma$ on ECOLI]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ecoli_gamma}
        \label{fig:sensitivity_acc_ecoli_gamma}}
    \\
    \subfloat[$\gamma$ on ORL]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_ORL_gamma}
        \label{fig:sensitivity_acc_ORL_gamma}}
    \subfloat[$\gamma$ on JAFFE]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_JAFFE_gamma}
        \label{fig:sensitivity_acc_JAFFE_gamma}}
    \\
    \subfloat[$\gamma$ on TOX]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_TOX_gamma}
        \label{fig:sensitivity_acc_TOX_gamma}}
    \subfloat[$\gamma$ on GLIOMA]{\includegraphics[width=0.24\textwidth]{sensitivity_acc_GLIOMA_gamma}
        \label{fig:sensitivity_acc_GLIOMA_gamma}}
    \caption{Clustering accuracy w.r.t. on all data sets.}
    \label{sensitivity_gamma_aio}
\end{figure}

\appendices
\section*{Proof of Convergence }


\subsection{Proof of Theorem $\mathbf{III}$-$\mathbf{E4}$}\label{proof_u}
\begin{proof}
The objective function with respect to $\mathbf{U}$ in Eq.~\eqref{obj_u_2} can be rewritten as
\begin{align}\label{obj_u_3}
\mathbf{\mathcal{J}}(\mathbf{U}) &= \tr(\mathbf{U}^T \mathbf{F}^{+} \mathbf{U} \mathbf{G}^{+} ) - \tr(\mathbf{U}^T \mathbf{F}^{+} \mathbf{U} \mathbf{G}^{-} )  \nonumber\\
&-\tr(\mathbf{U}^T \mathbf{F}^{-} \mathbf{U} \mathbf{G}^{+} ) + \tr(\mathbf{U}^T \mathbf{F}^{-} \mathbf{U} \mathbf{G}^{-} ) \nonumber\\
&- 2\tr( \mathbf{U}^T\mathbf{H}^{+} ) + 2\tr( \mathbf{U}^T\mathbf{H}^{-} )
\end{align}
By applying Lemma \ref{lemma_tr_neq}, we have
\begin{align}\label{neq_u_01}
\tr(\mathbf{U}^T \mathbf{F}^{+} \mathbf{U} \mathbf{G}^{+} ) \leq \sum_{i=1}^{n}\sum_{p=1}^{k} \frac{(\mathbf{F}^{+} \mathbf{U'} \mathbf{G}^{+} )_{ip} \mathbf{U}_{ip}^{2} }{\mathbf{U'}_{ip}} \\
\tr(\mathbf{U}^T \mathbf{F}^{-} \mathbf{U} \mathbf{G}^{-} ) \leq \sum_{i=1}^{n}\sum_{p=1}^{k} \frac{(\mathbf{F}^{-} \mathbf{U'} \mathbf{G}^{-} )_{ip} \mathbf{U}_{ip}^{2} }{\mathbf{U'}_{ip}}
\end{align}

Moreover, by the inequality $a \leq \frac{a^2 + b^2}{2b}, \forall a,b>0$, we have the following inequality
\begin{align}\label{neq_u_02}
\tr( \mathbf{U}^T\mathbf{H}^{-} ) \leq \sum_{i=1}^{n}\sum_{p=1}^{k}\mathbf{H}^{-}_{ip} \frac{\mathbf{U}_{ip}^2 + \mathbf{U'}_{ip}^2}{2\mathbf{U'}_{ip}}
\end{align}

To obtain the lower bound for the remaining terms, we use the inequality that $z \leq 1 + \log z, \forall z > 0$, then
\begin{align}\label{neq_u_03}
\tr( \mathbf{U}^T\mathbf{H}^{-} ) &\geq \sum_{i=1}^{n}\sum_{p=1}^{k} \mathbf{H}^{-}_{ip}  \mathbf{U'}_{ip} (1 + \log \frac{\mathbf{U}_{ip}}{\mathbf{U'}_{ip}} )
\end{align}
\begin{align}\label{neq_u_04}
&\quad \tr(\mathbf{U}^T \mathbf{F}^{+} \mathbf{U} \mathbf{G}^{-} ) \nonumber \\ &=\sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{U}_{ip}\mathbf{F}_{ij}^{+}\mathbf{U}_{jq}\mathbf{G}_{qp}^{-} \\
&\geq \sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{F}_{ij}^{+}\mathbf{G}_{qp}^{-}\mathbf{U'}_{ip}\mathbf{U'}_{jq}(1 + \log \frac{\mathbf{U}_{ip}\mathbf{U}_{jq}}{\mathbf{U'}_{ip}\mathbf{U'}_{jq}} )\nonumber
\end{align}


\begin{align}\label{neq_u_05}
&\quad \tr(\mathbf{U}^T \mathbf{F}^{-} \mathbf{U} \mathbf{G}^{+} ) \nonumber \\ &=\sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{U}_{ip}\mathbf{F}_{ij}^{-}\mathbf{U}_{jq}\mathbf{G}_{qp}^{+} \\
&\geq \sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{F}_{ij}^{-}\mathbf{G}_{qp}^{+}\mathbf{U'}_{ip}\mathbf{U'}_{jq}(1 + \log \frac{\mathbf{U}_{ip}\mathbf{U}_{jq}}{\mathbf{U'}_{ip}\mathbf{U'}_{jq}} )\nonumber
\end{align}
By summing over all the bounds in Eq.~\eqref{neq_u_01}, ~\eqref{neq_u_02}, ~\eqref{neq_u_03}, ~\eqref{neq_u_04} and Eq. ~\eqref{neq_u_05}, we can get $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})$, which obviously satisfies (1) $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'}) \geq \mathbf{\mathcal{J}}(\mathbf{U})$; (2)$\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U}) \geq \mathbf{\mathcal{J}}(\mathbf{U})$.

To find the minimum of $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})$, we take
\begin{align}
&\quad \frac{\partial \mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})}{\partial \mathbf{U}_{ip}} \\
&= \quad 2\frac{(\mathbf{F}^{+} \mathbf{U'} \mathbf{G}^{+} )_{ip} \mathbf{U}_{ip} }{\mathbf{U'}_{ip}} +  2 \frac{(\mathbf{F}^{-} \mathbf{U'} \mathbf{G}^{-} )_{ip} \mathbf{U}_{ip} }{\mathbf{U'}_{ip}} \nonumber \\
&-\quad 2 \sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{F}_{ij}^{-}\mathbf{G}_{qp}^{+}\frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}} -2\sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{F}_{ij}^{+}\mathbf{G}_{qp}^{-}\frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}} \nonumber \\
&+ \quad 2 \mathbf{H}^{-}_{ip} \frac{\mathbf{U}_{ip}}{\mathbf{U'}_{ip}} - 2 \mathbf{H}^{+}_{ip} \frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}} \nonumber
\end{align}
and the following Hessian matrix of $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})$
\begin{align}
&\quad \frac{\partial^2 \mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})}{\partial \mathbf{U}_{ip} \partial \mathbf{U}_{jq}} \\
&= \quad  \delta_{ip}\delta_{jq}\left ( 2\frac{(\mathbf{F}^{+} \mathbf{U'} \mathbf{G}^{+} )_{ip}  }{\mathbf{U'}_{ip}} +  2 \frac{(\mathbf{F}^{-} \mathbf{U'} \mathbf{G}^{-} )_{ip}  }{\mathbf{U'}_{ip}} \right. \nonumber \\
&+\quad 2 \sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{F}_{ij}^{-}\mathbf{G}_{qp}^{+}\frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}^2} +2\sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{F}_{ij}^{+}\mathbf{G}_{qp}^{-}\frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}^2} \nonumber \\
&+ \quad 2  \left. \frac{\mathbf{H}^{-}_{ip}}{\mathbf{U'}_{ip}} + 2 \mathbf{H}^{+}_{ip} \frac{\mathbf{U'}_{ip}}{\mathbf{U}_{ip}^2}  \right) \nonumber
\end{align}
is a diagonal matrix with positive diagonal elements.

Thus $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})$ is a convex function of $\mathbf{U}$. Therefore, we can obtain the global minimum of $\mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})$ by setting $\frac{\partial \mathbf{\mathcal{J}}(\mathbf{U}, \mathbf{U'})}{\partial \mathbf{U}_{ip}} = 0$ and solving for $\mathbf{U}$, from which we can get Eq.~\eqref{eq:update_U}.
\end{proof}


\subsection{Proof of Theorem $\mathbf{III}$-$\mathbf{E6}$}\label{proof_v}
The objective function in Eq.~\eqref{opt_v2} can be rewritten as
\begin{align}\label{obj_v_3}
\mathbf{\mathcal{J}}(\mathbf{V}) &= \tr(\mathbf{V}^T \mathbf{B}^{+} \mathbf{V} \mathbf{P}^{+} ) - \tr(\mathbf{V}^T \mathbf{B}^{+} \mathbf{V} \mathbf{P}^{-} )  \nonumber\\
&-\tr(\mathbf{V}^T \mathbf{B}^{-} \mathbf{V} \mathbf{P}^{+} ) + \tr(\mathbf{V}^T \mathbf{B}^{-} \mathbf{V} \mathbf{P}^{-} ) \nonumber\\
&- 2\tr( \mathbf{V}^T\mathbf{M}^{+} ) + 2\tr( \mathbf{V}^T\mathbf{M}^{-} ) \nonumber \\
&+ \lambda_1  \tr(\mathbf{V}^T \mathbf{Z}^{+} \mathbf{V}) - \lambda_1  \tr(\mathbf{V}^T \mathbf{Z}^{-} \mathbf{V}) \nonumber\\
& + \lambda_{2} \mathbf{V}^T \mathbf{Q} \mathbf{V}
\end{align}
By applying Lemma \ref{lemma_tr_neq}, we have
\begin{align}\label{neq_v_01}
\tr(\mathbf{V}^T \mathbf{B}^{+} \mathbf{V} \mathbf{P}^{+} ) &\leq \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{B}^{+} \mathbf{V'} \mathbf{P}^{+} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} \\
\tr(\mathbf{V}^T \mathbf{B}^{-} \mathbf{V} \mathbf{P}^{-} ) &\leq \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{B}^{-} \mathbf{V'} \mathbf{P}^{-} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}}\\
\tr(\mathbf{V}^T \mathbf{Z}^{+} \mathbf{V} ) &\leq \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{Z}^{+} \mathbf{V'} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}} \\
\tr(\mathbf{V}^T \mathbf{Q} \mathbf{V} ) &\leq \sum_{i=1}^{d}\sum_{p=1}^{k} \frac{(\mathbf{Q} \mathbf{V'} )_{ip} \mathbf{V}_{ip}^{2} }{\mathbf{V'}_{ip}}
\end{align}
Moreover, by the inequality $a \leq \frac{a^2 + b^2}{2b}, \forall a,b>0$, we have the following inequality
\begin{align}\label{neq_v_02}
\tr( \mathbf{V}^T\mathbf{M}^{-} ) \leq \sum_{i=1}^{d}\sum_{p=1}^{k}\mathbf{M}^{-}_{ip} \frac{\mathbf{V}_{ip}^2 + \mathbf{V'}_{ip}^2}{2\mathbf{V'}_{ip}}
\end{align}

To obtain the lower bound for the remaining terms, we use the inequality that $z \leq 1 + \log z, \forall z > 0$, then
\begin{align}\label{neq_v_03}
\tr( \mathbf{V}^T\mathbf{M}^{-} ) &\geq \sum_{i=1}^{d}\sum_{p=1}^{k} \mathbf{M}^{-}_{ip}  \mathbf{V'}_{ip} (1 + \log \frac{\mathbf{V}_{ip}}{\mathbf{V'}_{ip}} )
\end{align}
\begin{align}\label{neq_v_04}
&\quad \tr(\mathbf{V}^T \mathbf{B}^{+} \mathbf{V} \mathbf{P}^{-} ) \nonumber \\ &=\sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{V}_{ip}\mathbf{B}_{ij}^{+}\mathbf{V}_{jq}\mathbf{P}_{qp}^{-} \\
&\geq \sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{B}_{ij}^{+}\mathbf{P}_{qp}^{-}\mathbf{V'}_{ip}\mathbf{V'}_{jq}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jq}}{\mathbf{V'}_{ip}\mathbf{V'}_{jq}} )\nonumber
\end{align}

\begin{align}\label{neq_v_05}
&\quad \tr(\mathbf{V}^T \mathbf{B}^{-} \mathbf{V} \mathbf{P}^{+} ) \nonumber \\ &=\sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{V}_{ip}\mathbf{B}_{ij}^{-}\mathbf{V}_{jq}\mathbf{P}_{qp}^{+} \\
&\geq \sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k}\sum_{q=1}^{k} \mathbf{B}_{ij}^{-}\mathbf{P}_{qp}^{+}\mathbf{V'}_{ip}\mathbf{V'}_{jq}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jq}}{\mathbf{V'}_{ip}\mathbf{V'}_{jq}} )\nonumber
\end{align}

\begin{align}\label{neq_v_06}
&\quad \tr(\mathbf{V}^T \mathbf{Z}^{-} \mathbf{V}  ) =\sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k} \mathbf{V}_{ip}\mathbf{Z}_{ij}^{-}\mathbf{V}_{jp} \\
&\geq \sum_{i=1}^{d}\sum_{j=1}^{d}\sum_{p=1}^{k} \mathbf{Z}_{ij}^{-}\mathbf{V'}_{ip}\mathbf{V'}_{jp}(1 + \log \frac{\mathbf{V}_{ip}\mathbf{V}_{jp}}{\mathbf{V'}_{ip}\mathbf{V'}_{jp}} )\nonumber
\end{align}


By summing over all the bounds in Eq.~\eqref{neq_v_01}, ~\eqref{neq_v_02}, ~\eqref{neq_v_03}, ~\eqref{neq_v_04}, ~\eqref{neq_v_05} and Eq.~\eqref{neq_v_06}, we can get $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})$, which satisfies (1) $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'}) \geq \mathbf{\mathcal{J}}(\mathbf{V})$; (2)$\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V}) \geq \mathbf{\mathcal{J}}(\mathbf{V})$.


To find the minimum of $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})$, we take
\begin{align}
&\quad \frac{\partial \mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})}{\partial \mathbf{V}_{ip}} \\
&= \quad 2\frac{(\mathbf{B}^{+} \mathbf{V'} \mathbf{P}^{+} )_{ip} \mathbf{V}_{ip} }{\mathbf{V'}_{ip}} +  2 \frac{(\mathbf{B}^{-} \mathbf{V'} \mathbf{P}^{-} )_{ip} \mathbf{V}_{ip} }{\mathbf{V'}_{ip}} \nonumber \\
&-\quad 2 \sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{B}_{ij}^{-}\mathbf{P}_{qp}^{+}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}} -2\sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{B}_{ij}^{+}\mathbf{P}_{qp}^{-}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}} \nonumber \\
&+ \quad 2 \mathbf{M}^{-}_{ip} \frac{\mathbf{V}_{ip}}{\mathbf{V'}_{ip}} - 2 \mathbf{M}^{+}_{ip} \frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}} \nonumber \\
&+\quad2\lambda_1\frac{(\mathbf{Z}^{+} \mathbf{V'} )_{ip} \mathbf{V}_{ip} }{\mathbf{V'}_{ip}} -2\lambda_1\sum_{j=1}^{d} \mathbf{Z}_{ij}^{-}\mathbf{V'}_{jp}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}}\nonumber \\
&+\quad2\lambda_2\frac{(\mathbf{Q} \mathbf{V'} )_{ip} \mathbf{V}_{ip} }{\mathbf{V'}_{ip}} \nonumber
\end{align}
and the following Hessian matrix of $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})$
\begin{align}
&\quad \frac{\partial^2 \mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})}{\partial \mathbf{V}_{ip} \partial \mathbf{V}_{jq}} \\
&= \quad  \delta_{ip}\delta_{jq}\left ( 2\frac{(\mathbf{B}^{+} \mathbf{V'} \mathbf{P}^{+} )_{ip}  }{\mathbf{V'}_{ip}} +  2 \frac{(\mathbf{B}^{-} \mathbf{V'} \mathbf{P}^{-} )_{ip}  }{\mathbf{V'}_{ip}} \right. \nonumber \\
&+\quad 2 \sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{B}_{ij}^{-}\mathbf{P}_{qp}^{+}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}^2} +2\sum_{j=1}^{n}\sum_{q=1}^{k} \mathbf{B}_{ij}^{+}\mathbf{P}_{qp}^{-}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}^2} \nonumber \\
&+ \quad 2  \left. \frac{\mathbf{M}^{-}_{ip}}{\mathbf{V'}_{ip}} + 2 \mathbf{M}^{+}_{ip} \frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}^2}  \right) \nonumber \\
&+\quad2\lambda_1\frac{(\mathbf{Z}^{+} \mathbf{V'} )_{ip}  }{\mathbf{V'}_{ip}} +2\lambda_1\sum_{j=1}^{d} \mathbf{Z}_{ij}^{-}\mathbf{V'}_{jp}\frac{\mathbf{V'}_{ip}}{\mathbf{V}_{ip}^2}\nonumber \\
&+\quad2\lambda_2\frac{(\mathbf{Q} \mathbf{V'} )_{ip}  }{\mathbf{V'}_{ip}} \nonumber
\end{align}
is a diagonal matrix with positive diagonal elements.

Thus $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})$ is a convex function of $\mathbf{V}$. By setting $\frac{\partial \mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})}{\partial \mathbf{V}_{ip}} = 0$, we can obtain the global minimum of $\mathbf{\mathcal{J}}(\mathbf{V}, \mathbf{V'})$ according to Eq.~\eqref{update_v}.





%\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{a3.png}}]{Third C. Author, Jr.} (M'87) received the B.S. degree in mechanical
%engineering from National Chung Cheng University, Chiayi, Taiwan, in 2004
%and the M.S. degree in mechanical engineering from National Tsing Hua

%\end{IEEEbiography}

\bibliographystyle{IEEEtran}
\bibliography{lsdcl-ufs}

\EOD

\end{document}
