\documentclass{ieeeaccess}
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}


\usepackage{cite}
\usepackage[ruled]{algorithm2e}
\usepackage{textcomp}
\usepackage{xcolor}
\usepackage{bm}
\usepackage{url}
\usepackage{longtable,multirow,colortbl,booktabs}
\newcommand{\st}{\mathrm{s.t.}}
\newcommand{\diag}{\mathrm{diag}}
\newcommand{\tr}{\mathrm{tr}}
\renewcommand\arraystretch{1.5}
\DeclareMathOperator*{\argmin}{arg\,min}
\usepackage{graphicx,subfig}
%\usepackage{float}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
    T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\begin{document}
\history{Date of publication xxxx 00, 0000, date of current version xxxx 00, 0000.}
\doi{10.1109/ACCESS.2017.DOI}

\title{Local Graph Reconstruction for Parameter Free Unsupervised Feature Selection}
\author{\uppercase{Liang Du}\authorrefmark{1,2,3}, \IEEEmembership{Member, IEEE},
    \uppercase{Chaohong Ren\authorrefmark{1}, Xiaolin Lv\authorrefmark{1}, \\
    Yan Chen\authorrefmark{1}, Peng Zhou\authorrefmark{4} and Zhiguo Hu\authorrefmark{1,2,3}
}}
\address[1]{School of Computer and Information Technology, Shanxi University, Taiyuan 030006, Shanxi Province, China}
\address[2]{Institute of Big Data Science and Industry, Shanxi University, Taiyuan 030006, Shanxi Province, China}
\address[3]{Key Laboratory of Computational Intelligence and Chinese Information Processing of Ministry of Education, Shanxi University, Taiyuan, 030006, China}
\address[4]{School of of Computer Science and Technology, Anhui University, Hefei 230601, China}
\tfootnote{This work is supported in part by the National Natural Science Foundation of China grant 61502289, 61603230, 61806003, 61802238 and 61872226, and supported by the Natural Science Foundation of Shanxi Province, China grant No.201701D121052 and No.201801D221163.}

\markboth
{Author \headeretal: Preparation of Papers for IEEE TRANSACTIONS and JOURNALS}
{Author \headeretal: Preparation of Papers for IEEE TRANSACTIONS and JOURNALS}

\corresp{Corresponding author: Liang Du (e-mail: csliangdu@gmail.com) and Zhiguo Hu (646579354@qq.com).}
\begin{abstract}
    Facing with the absence of supervised information to guide the search of relevant features and the grid-search of model/hyper-parameters, it is more preferred to develop parameter-free methods and avoid additional hyper-parameters tuning. In this paper, we propose a new simple and effective parameter free unsupervised feature selection algorithm by minimizing the linear reconstruction weight between the nearest neighbor graphs constructed from all candidate features and each single feature. The obtained global optimal reconstruction weights actually select those features with highest relevance and lowest redundancy simultaneously. Experimental results on many benchmark data sets demonstrate that the proposed method outperforms many state-of-the-art  unsupervised feature selection methods.
\end{abstract}

\begin{keywords}
Local Graph Reconstruction, Parameter Free, Global Optimal, Redundancy Minimization
\end{keywords}

\titlepgskip=-15pt

\maketitle



%The task of feature selection is to find the most representative features from the original high-dimensional data. The unsupervised feature selection lack the supervised information to guide the search of relevant features. The grid search of unsupervised feature selection models or hyper-parameters is also unrealistic in practical application. 
\begin{IEEEkeywords}
Local Structure, Graph Structure Preserving, Filter-based 
\end{IEEEkeywords}

\section{Introduction}
Real world applications usually involve big data with high dimensionality, presenting great challenges such as the curse of dimensionality, huge computation and storage cost. To tackle these difficulties, feature selection techniques are developed to keep a few relevant and informative features from the original high-dimensional features for the subsequent tasks. Based on a small number of representative features, not only the learning process of the model could be accelerated, but also the generalization ability could be improved.

According to the availability of supervised information, feature selection methods can be categorized into supervised \cite{lasso,robnik2003theoretical,mifs,opti_fe,seqfs,rfs_l21} semi-supervised \cite{zhao2007semi,xu2010discriminative,semigraph} and unsupervised algorithms \cite{dy2004feature}, \cite{mcfs}. Compared to supervised or semi-supervised counterparts, unsupervised feature selection is generally more challenging due to the lack of supervised information to guide the search of relevant features.

Generally speaking, unsupervised feature selection methods can be further categorized into two types: filter and  embedded\cite{rfs_lei,du2015unsupervised,rsr,disr,gsrufs,SPNFSR,lgdfs,topkfs,lrsl,opsr,cgufs3,zhang2019unsupervised,zhang2018unsupervised,du2018exploiting,li2018generalized,lpp_app,chen2018local,rpca}. The filter-based feature selection algorithms rank the features in terms of a predefined criterion, which is completely independent on the learning methods. The embedded-based methods consider the feature evaluation criterion by incorporating into the learning procedure. Since embedded-based methods take the learning model into consideration, they usually perform better than filter-based ones. However, these methods often involve more parameters for tuning, it is hard to effectively perform unsupervised model selection due to the absence of supervised information. Besides, the embedded algorithms are also computationally expensive thereby impeding their uses in the tasks where the dimensionality and the amount of the data are large.


%The unsupervised feature selection lack the supervised information to guide the search of relevant features. The grid search of unsupervised feature selection models or hyper-parameters is also unrealistic in practical application. 
%In view of the above analysis, the filter-based methods seem to be more attractive and practical, especially when the volume of features is huge. 

In this paper, we are particularly interested in the parameter-free (or at least the hyper-parameters can be easily set to a certain constant value) feature selection methods, in which data variance, Laplacian score\cite{lapscore}, sparsity score\cite{liu2014sparsity}, Multi Cluster Feature Selection \cite{mcfs}, the Local and Global Discriminative algorithm\cite{lgd}, and LLE score\cite{llescore} are representatives. However, the quadratic score function in \cite{lapscore}, \cite{lgd} have several drawbacks as pointed out \cite{llescore}. The LLE score \cite{llescore} also requires fine tuning of the regularization parameter as suggested by the authors. Moreover, these aforementioned algorithms usually neglect the correlations of candidate features by evaluating the importance of features one by one and lead to sub-optimal result.

Facing with such difficulties, it is more preferred to develop parameter-free methods or methods without additional hyper-parameter tuning. In this paper, we propose a new simple and effective parameter free unsupervised feature selection algorithm, where we adopt the simple nearest neighbor graph to characterize the local structure of data. The importance of each feature is evaluated by minimizing the reconstruction weight between the weight matrices of all features and single feature. It can be further verified that our method actually selects highly relevant and lower redundant features \cite{lgd, Nie2018A}.
Experimental results on many benchmark data sets demonstrate that the proposed method outperforms many state-of-the-art parameter-free unsupervised methods.

The rest of the paper is organized as follows. In Section 2, we review the most closely related filter-based unsupervised algorithms. The proposed algorithm is derived in Section 3. In Section 4, we evaluate the proposed method on many data sets. We make concluding remarks in Section 5.
\section{Related Work}
In this section, we will first review some related unsupervised feature selection methods.
\subsection{Laplacian Score}
Laplacian score is a filter-based unsupervised feature selection method. The main idea of Laplacian score is that the data points which locate nearby are probably related to the same class. Therefore, the local structure of the data is more important than the global structure. In this way, Laplacian score evaluates the feature by its ability of preserving the local structure of data.

It computes the neighborhood relationship according to
\begin{align}
w_{ij} = \left\{ \begin{array}{cl}
e^{-\frac{||\mathbf{x}_i - \mathbf{x}_j||^2}{t^2}} &  \textrm{if } \mathbf{x}_i \textrm{ and } \mathbf{x}_j \textrm{ are neighbors,} \\
0 &  \textrm{otherwise.}
\end{array}
\right.
\end{align}
Then, the score of each feature is computed as follows
\begin{align}
\label{lapscore}
\textrm{LapScore}_r = \frac{\sum_{i=1}^{n} \sum_{j=1}^{n} (f_{ri} - f_{rj})^2 w_{ij} }{\sum_{i=1}^{n} (f_{ri} - \mu_{r})^2 d_{ii}}
\end{align}
where $d_{ii} = \sum_{j=1}^{n} w_{ij}$ and $\mu_r$ is the average of $r$-th feature.
\subsection{LGD Score}
The LGD score\cite{lgd} takes an unsupervised Local and Global Discriminative (LGD) feature selection criterion. The score of each feature is defined as the ratio between global variance and local variance:
\begin{align}
\label{lgd}
\textrm{LGD}_r = \frac{\sum_{i=1}^{n} (f_{ri} - \mu_r)^2}{\sum_j \sum_{f_{ri} \in o(f_{rj}  )} ( f_{ri} - \bar{f}_{rj})^2}
\end{align}
where $f_{ri}$ is the $r$-th feature of the $i$-th sample, $\mu_r$ is the mean of the $r$-th feature, $o(f_{rj})$ is the set of neighbor points of the $j$-th sample. $\bar{f}_{rj}$ is the mean computed from the set of neighbor points.

The LGD score prefers to select features with large global variance and small local variance. Such features are expected to be discriminative for classification/clustering tasks.


\subsection{LLE Score}
LLE score\cite{llescore} takes the Local Linear Embedding method \cite{zhou2015lle} to characterize the local structure of data. It evaluates the importance of each feature according to the difference of reconstruction weights computed from the single feature and all the features.

The optimal reconstruction weight $\mathbf{M} \in \mathcal{R}^{n \times n}$ with all the candidate features is computed by solving
\begin{align}
\min_{\mathbf{M}} \quad ||\mathbf{x}_i - \sum_{j \in \mathcal{N}_i } m_{ij} \mathbf{x}_j||^2, \quad \st \sum_{j \in \mathcal{N}_i } m_{ij}=1,
\end{align}
where $\mathcal{N}_i$ is the neighbor of $i$-sample. The reconstruction weight captured by the $r$-th feature is also computed by
\begin{align}
\min_{\mathbf{M}^r} \quad& ||f_{ri} - \sum_{j \in \mathcal{N}_i^{r} } m_{ij}^{r} f_{rj}||^2 + \gamma \sum_{j \in \mathcal{N}_i^{r} } (m_{ij}^{r})^2, \\
\st \quad& \sum_{j \in \mathcal{N}_i^{r} } m_{ij}^r=1, \nonumber
\end{align}
where $\mathcal{N}_i^r$ is the neighbor of $i$-sample from $r$-th feature.

Given the reconstruction weight of all features $\mathbf{M}$ and the reconstruction weight of $r$-th feature $\mathbf{M}^{r}$, the LLE score is computed by
\begin{align}
\textrm{LLEScore}_r = ||\mathbf{M} - \mathbf{M}^r||^2.
\end{align}
For each feature, the above criterion of LLE score evaluates the ability to preserve the local linear structure.

\section{The Proposed Method}
The generic problem of unsupervised feature selection is to find the most informative features. Given a set of points $\mathbf{X} = \{\mathbf{x}_1 ,..., \mathbf{x}_n\} \in \mathcal{R}^{d \times n}$, finding a feature subset with size $m$ which contains the most informative features. In other words, the points$\{\mathbf{x}_1^{'}, \mathbf{x}_2^{'}, \ldots, \mathbf{x}_n^{'}\}$ represented in the $m$-dimensional space $\mathcal{R}^m$ can well preserve the intrinsic structure as the data represented in the original $d$-dimensional space.

It has been well recognized that the local structure of the data space is more important than the global structure for the task of clustering and classification \cite{chen2018local}. Several approaches have been developed for local structure characterization. In order to model the local geometric structure, we construct a simple and effective nearest neighbor graph $\mathbf{A}$, which is generated from all the candidate features without additional parameter except for the neighborhood size. The weight matrix can be obtained according to
\begin{align}
\label{compute_A}
\mathbf{A}_{ij} = \left\{ \begin{array}{cl}
\frac{1}{n_i} &  \textrm{if } \mathbf{x}_i \textrm{ and } \mathbf{x}_j \textrm{ are neighbors,} \\
0 &  \textrm{otherwise.}
\end{array}
\right.
\end{align}
where $n_i$ is the neighbor size of $i$-th sample.

Given the local structure captured by the affinity graph, most existing filter-based algorithms take the quadratic function to evaluate the importance of each feature such as in Eq.~\eqref{lapscore} and Eq.~\eqref{lgd}. However, it has been pointed out that such score function suffers from at least three drawbacks\cite{llescore}: 1) it fails when the elements of all the samples are equal; 2) it lacks the scaling invariant property; 3) it cannot well capture the change of the graph for each element. These weaknesses will greatly degrade its performance in feature selection.

Instead of using the quadratic function, we adopt similar idea as LLE score by constructing the local structure, i.e. $\mathbf{A}^r$, for each single feature. The weight matrix for $r$-th feature is computed by
\begin{align}
\label{compute_Ar}
\mathbf{A}_{ij}^{r} = \left\{ \begin{array}{cl}
\frac{1}{n_i^r} &  \textrm{if } f_i^r \textrm{ and } f_j^r \textrm{ are neighbors,} \\
0 &  \textrm{otherwise.}
\end{array}
\right.
\end{align}
where $n_i^r$ is the neighbor size of $i$-th sample with $r$-th feature.


Here, since both the weight matrices $\mathbf{A}$ and $\{\mathbf{A}^r\}_{r=1}^{m}$ capture the local structure of data, the importance of each feature then can be evaluated by their reconstruction weights to the consensus weight matrix $\mathbf{A}$. The corresponding problem can be formulated as 
\begin{align}
\label{score}
\min_{\mathbf{w}} \quad & ||\mathbf{A} - \sum_{r=1}^{d} w_i \mathbf{A}^i||^2 \\
\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. \nonumber
\end{align}

It is obvious that the above formulation does not introduce additional hyper-parameter for certain regularization, such as sparse regularization, which is often adopted by other embedded unsupervised feature selection algorithms. Such merit of parameter free is especially helpful for the task of unsupervised feature selection without supervision.


We first introduce $\mathbf{H} \in \mathcal{R}^{d \times d}$ by denoting
\begin{align}
\mathbf{H}_{ij} =& \tr( (\mathbf{A}^i)^T \mathbf{A}^j), 
\end{align}
where $\mathbf{H}_{ij}$ is used to characterize the similarity between feature $i$ and $j$, and introduce $\mathbf{b} \in \mathcal{R}^{d \times 1}$ with
\begin{align}
b_i  =& \tr( \mathbf{A}^T \mathbf{A}^i)
\end{align}
where $b_i$ represents the similarity between feature $i$ and all the candidate features. Then, the problem in Eq.~\eqref{score} can be reformulated as
\begin{align}\label{score2}
\min_{\mathbf{w}} \quad & \mathbf{w}^T\mathbf{H} \mathbf{w} - 2 \mathbf{w}^T \mathbf{b}\\
\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. \nonumber
\end{align}
Clearly, the above problem is a convex quadratic programming with linear constraints, which can be easily solved by off-the-shelf optimization toolbox. %For data set with high dimensionality, we also provide an efficient algorithm within the general augmented Lagrangian multiplier method in the Appendix.

Now, we can fully investigate why the proposed algorithm is suitable for the task of unsupervised feature selection. The first term in the objective function $\mathbf{w}^T \mathbf{H} \mathbf{w}$ can be rewritten as $\sum_{i,j=1}^d \mathbf{H}_{ij} w_i w_j$. When $\mathbf{H}_{ij}$ is large, it indicates that the feature $i$ and $j$ have the similar neighborhood structure. That is to say, they are redundant features. By minimizing $\sum_{i,j=1}^d \mathbf{H}_{ij} w_i w_j$, the value of $w_i$ and $w_j$ can not be large simultaneously. In this case, we will get one large value and one small value for these two highly redundant features. Consequently, the ranking of one feature is kept and the ranking of the other feature decreases, when these two features are highly similar with each other. The second term in the objective function is equivalent to maximizing $\sum_{i=1}^{d} w_i b_i$. It actually selects those features which have similar neighbor structure with the structure constructed from all the candidate features, i.e., $\mathbf{A}$. Given the optimal reconstruction weight of Eq.~\eqref{score} or Eq~\eqref{score2}, we can use it to evaluate the ability of each feature to preserve the local structure. It can be seen that the features with higher scores are better for the preserving of the local structure. We list the details of the proposed method in Algorithm~\ref{alg}.

\begin{algorithm}%[h]
    \caption{The proposed filter-based unsupervised feature selection algorithm }
    \label{alg}
    \begin{algorithmic}
    \REQUIRE{Data matrix $\bm{X} \in \mathbb{R}^{d \times n}$, the number of selected features $m$}
        \begin{enumerate}
            \item Construct the $k$-nearest neighbor matrix $\mathbf{A}$ using all the candidate features according to Eq.~\eqref{compute_A}.
            \item Construct the $k$-nearest neighbor matrix $\mathbf{A}^r$ using the $r$-th feature according to Eq.~\eqref{compute_Ar}.
            \item Compute the score for each feature by solving Eq.~\eqref{score} or Eq.~\eqref{score2}.
            \item Ranking features $\mathbf{w}$ in descending order, select the top $m$ features.
        \end{enumerate}
        \ENSURE{top $m$ features}
    \end{algorithmic}
\end{algorithm}



Although the above algorithm is simple and easy to understand, it still brings several nice properties for the task of unsupervised feature selection. It is worthwhile to highlight several aspects of the proposed approach here:
\begin{itemize}
    \item It is parameter free. It can be seen that only one parameter, i.e., the neighborhood size, is involved. In practice, we set the neighborhood size to be 5 in our experiment. It is vital important to point out that the parameter or model selection is often unrealistic for the task of unsupervised feature selection due to the absence of supervised information. That is also one of the main advantages of our method compared to other filter-based methods and embedded approaches.
    
    Comparatively speaking, Laplacian score, LGD\cite{lgd}, MCFS\cite{mcfs} and LLE score\cite{llescore} both need the neighborhood size. Besides, they often require additional hyper-parameters, such as the kernel width for Gaussian kernel function in Laplacian score, the regularization parameter in MCFS and LLE score. 
    
    \item It evaluates the importance of each feature by considering both the relevance and redundancy simultaneously. As a result, the redundancy of the select feature subset is large alleviated and features with more relevant information can be further selected. 
    
    \item The global optima of the optimization problem in Eq.~\eqref{score} can be easily obtained. Thus, we can avoid the greedy search or the local optimal which are often encountered by other counterparts.
    
    \item It is scale invariant. For example, let $\mathbf{f}_1 = 2\mathbf{f}_2$, $\mathbf{f}_1$ and $\mathbf{f}_2$ have the same graph structure and the score for $\mathbf{f}_1$ and $\mathbf{f}_2$ are equal. It has been pointed out that Laplacian score has different results for such features\cite{lapscore}. The LGD algorithm also has such problem\cite{lgd}.
    
    \item It is distinguished for features with equal values. The local structure of the feature with equal values does not contain any meaningful structure, and will get smaller reconstruction weight. Both the Laplacian score and the LGD algorithm will give the best score for such features.
    
    \item It is less sensitive to the change of local weight, which would be largely influenced by heavily data corruption. The binary graph weighting used in Eq.~\eqref{compute_A} only captures the neighborhood relationship while ignoring the relative sensitive closeness weights. It is believed that such weighting schema is also robust to data corruption in certain extent. It captures the change of local structure efficiently. Once the local neighborhood is changed for certain feature, it will be penalized by Eq.~\eqref{score}.
    
\end{itemize}



Now, we analyze the time complexity of the proposed method. The cost of computing the Euclidean distances between the $i$-th sample and the other samples is $\mathbf{O}(nd)$, then finding its $k$-nearest neighbors costs $\mathbf{O}(nk)$. Thus, the total computational complexity of computing $\mathbf{A}$ is $\mathbf{O}(n^2d+n^2k)$. The computational complexity for $\mathbf{A}^r$ is $\mathbf{O}(n \log n)$. The selection of top features is $\mathbf{O}(d m)$, where $m$ is the number of selected features. In most cases, $d > k$, in this way, the computational complexity can be written into $\mathbf{O}(n^2d + d n \log n)$. The quadratic optimization problem in \eqref{score2} can be solved in $\mathbf{O}(d^3)$. The time complexity of the proposed method is also comparable with other methods.


\section{Experiment}
In this section, extensive experiments are conducted on 6 real-world datasets to validate the effectiveness of the proposed method. Four state-of-art relevant unsupervised feature selection methods are adopted as competitors.

\subsection{Data Sets}
We collect a variety of data sets, including 5 image data sets and 1 text corpora and 1 biological data, most of which have been frequently used to evaluate the performance of different feature selection algorithms. 
The statistics of these data sets, including the number of data samples, the dimension of each sample, the types and categories of each dataset and the number of selected features are summarized in Table \ref{dataset}. We further present the details of these 6 data set as follows

\begin{table}[h]
    \centering
    \caption{\textbf{The details of data sets in our experiments.}}
    \label{dataset}    
    \setlength{\tabcolsep}{1pt}{
        \begin{tabular}{l c c c c}
            \toprule[1.2pt]
            \textrm{Dataset} & \textrm{\# instances}& \textrm{\# features}& \textrm{\# selected features} & \textrm{\# classes}\\\midrule[1.2pt]
            \textrm{COIL}&1440&1024&[5:5:50]&20\\\midrule
            \textrm{JAFFE}&213&676&[5:5:50]&10\\\midrule
            \textrm{UMIST}&575&644&[5:5:50]&20\\\midrule
            \textrm{YALEB}&2414&1024&[5:5:50]&38\\\midrule
            \textrm{PIE}&2856&1024&[5:5:50]&68\\\midrule            
            \textrm{LUNG}&203&3312&[5:5:50]&5\\\bottomrule[1.2pt]
    \end{tabular}}
\end{table}



\begin{table*}
    \caption{Clustering accuracy on different datasets with $k$=5 (mean $\pm$ std).}
    \centering 
    \label{res_acc}
    \begin{tabular}{| c  || c || c | c | c | c | c |  c |}
        \hline
        Data Sets   &   AllFea  & MaxVar  &  LapScore  &  MCFS  &  LGD  &  LLEScore  &  Our Method  \\ \hline
        \hline
        COIL      &  0.5917  &  0.4330  $\pm$  0.0479  &  0.4559  $\pm$  0.0612  &  0.5417  $\pm$  0.0654  &  0.5060  $\pm$  0.0286  &  0.5677  $\pm$  0.0405  &  \textbf{0.5806  $\pm$  0.0241}\\ \hline
        JAFFE  &   0.7157  &  0.4816  $\pm$  0.0620  &  0.6769  $\pm$  0.0853  &  0.6599  $\pm$  0.0341  &  0.5897  $\pm$  0.0334  &  0.6458  $\pm$  0.0401  &  \textbf{0.7135  $\pm$  0.0510}\\ \hline
        UMIST  &   0.4240  &  0.4191  $\pm$  0.0379  &  0.3677  $\pm$  0.0111  &  0.3994  $\pm$  0.0233  &  0.4133  $\pm$  0.0371  &  0.4132  $\pm$  0.0146  &  \textbf{0.4922  $\pm$  0.0073}\\ \hline
        YALEB  &   0.0962  &  0.0907  $\pm$  0.0013  &  0.0866  $\pm$  0.0008  &  0.1121  $\pm$  0.0103  &  0.0954  $\pm$  0.0021  &  0.1266  $\pm$  0.0046  &  \textbf{0.1550  $\pm$  0.0148}\\ \hline
        PIE & 0.1809  &  0.1678  $\pm$  0.0190  &  0.1752  $\pm$  0.0157  &  0.1669  $\pm$  0.0094  &  0.1547  $\pm$  0.0472  &  0.1371  $\pm$  0.0047  &  \textbf{0.2914  $\pm$  0.0332}\\ \hline
        LUNG      &  0.7246  &  0.5271  $\pm$  0.0638  &  0.5329  $\pm$  0.0349  &  0.4381  $\pm$  0.0552  &  0.4424  $\pm$  0.0545  &  0.5107  $\pm$  0.0146  &  \textbf{0.6660  $\pm$  0.0538}\\ \hline
        \hline
        Average & 0.4555  &  0.3532 & 0.3825 &  0.3863 &  0.3669 &  0.3997 &  \textbf{0.4831} \\ \hline
    \end{tabular}
\end{table*}


\begin{table*}
    \caption{Clustering NMI on different datasets with $k$=5 (mean $\pm$ std).}
    \centering 
    \label{res_nmi}
    \begin{tabular}{| c  || c || c | c | c | c | c |  c |}
        \hline
        Data Sets   &   AllFea  & MaxVar  &  LapScore  &  MCFS  &  LGD  &  LLEScore  &  Our Method  \\ \hline
        \hline
        COIL      &  0.7376  &  0.5627  $\pm$  0.0582  &  0.5816  $\pm$  0.0595  &  0.6461  $\pm$  0.0780  &  0.6381  $\pm$  0.0367  &  0.6686  $\pm$  0.0462  &  \textbf{0.6728  $\pm$  0.0414} \\ \hline
        JAFFE  &   0.7921  &  0.5099  $\pm$  0.0971  &  0.7474  $\pm$  0.0890  &  0.7096  $\pm$  0.0333  &  0.6317  $\pm$  0.0194  &  0.6987  $\pm$  0.0476  &  \textbf{0.7841  $\pm$  0.0466} \\ \hline
        UMIST  &   0.6336  &  0.5652  $\pm$  0.0488  &  0.5466  $\pm$  0.0237  &  0.5322  $\pm$  0.0380  &  0.5658  $\pm$  0.0496  &  0.5872  $\pm$  0.0244  &  \textbf{0.6503  $\pm$  0.0139} \\ \hline
        YALEB  &   0.1290  &  0.1333  $\pm$  0.0053  &  0.1358  $\pm$  0.0012  &  0.1875  $\pm$  0.0232  &  0.1396  $\pm$  0.0067  &  0.2017  $\pm$  0.0111  &  \textbf{0.2552  $\pm$  0.0199}\\ \hline
        PIE & 0.4081  &  0.3985  $\pm$  0.0330  &  0.4112  $\pm$  0.0226  &  0.3924  $\pm$  0.0154  &  0.3798  $\pm$  0.1176  &  0.3480  $\pm$  0.0082  &  \textbf{0.5545  $\pm$  0.0304} \\ \hline
        LUNG      &  0.5220  &  0.3645  $\pm$  0.0769  &  0.3912  $\pm$  0.0389  &  0.2469  $\pm$  0.0874  &  0.2561  $\pm$  0.0720  &  0.3275  $\pm$  0.0220  &  \textbf{0.4771  $\pm$  0.0556} \\ \hline
        \hline
        Average &  0.5371  &  0.4223 &  0.4690 &  0.4525 &  0.4352 &  0.4720&  \textbf{0.5657}   \\ \hline 
    \end{tabular}
\end{table*}

\begin{itemize}
    \item \textbf{COIL20} dataset from Columbia University Image Library
    contains 20 classes, and each class has 72 images. Each image
    is of 32 × 32 pixels with some rotation.
    \item \textbf{JAFFE}. This database contains 213 images of 7 facial     expressions (6 basic facial expressions + 1 neutral) posed by 10 Japanese female models.
    \item \textbf{UMIST}. Face Database consists of 575 images of 20 people. Each covering a range of poses from profile to frontal views. Subjects cover a range of race/sex/appearance.
    \item \textbf{YALEB}. The Extended Yale-B database contains 16128 face images of 38 human subjects under 9 pose and 64 illumination conditions. In our experiment, we choose the frontal pose and use all the images under different illumination, thus we get 2414 images in total. They are resized to 32 × 32 pixels, with 256 gray levels per pixel.
    \item \textbf{PIE} is a gray (32x32 pixels) scale face images.
    In addition, the dataset has 68 persons and each person images
    have different illuminations and poses.
    \item \textbf{LUNG}\cite{rfs_l21} data set contains in total 203 samples in five classes, which have 139, 21, 20, 6,17 samples, respectively. Each sample has 12600 genes. The genes with standard deviations smaller than 50 expression units were removed and we obtained a data set with 203 samples and 3312 genes. 
\end{itemize}



\subsection{Compared Algorithms}
To validate the effectiveness of our proposed method\footnote{For the purpose of reproducibility, we provide all the dataset and the code at \url{https://gitee.com/csliangdu/LGRUFS}} , we compare it with one baseline (i.e., AllFea) and states-of-the-art almost parameter free unsupervised feature selection methods,
\begin{itemize}
    \item Max Variance which selects those features of maximum variances in order to obtain the best expressive power.
    \item Laplacian Score\cite{lapscore} which selects the features most consistent with the Gaussian Laplacian matrix.
    \item MCFS\footnote{\url{ http://www.cad.zju.edu.cn/home/dengcai/Data/code/MCFS_p.m}} performs eigen-decomposition regarding the Laplacian matrix, and evaluates the importance of features via sparse spectral regression\cite{mcfs}. The neighbor size is set to 5.
    \item The local and global discriminative (LGD) feature selection criterion \cite{lgd}. The score of feature is determined by the ratio between global variance and local variance.
    \item LLEScore \cite{llescore}. It is a filter-based unsupervised feature selection method, which is based on LLE and the graph-preserving feature selection framework. The difference between structures of the graphs constructed by each feature and the original data was used to measure the importance of each feature.
\end{itemize}


\begin{table*}
    \caption{Clustering purity on different datasets with $k$=5 (mean $\pm$ std).}
    \centering 
    \label{res_purity}
    \begin{tabular}{| c  || c || c | c | c | c | c |  c |}
        \hline
        Data Sets   &   AllFea  & MaxVar  &  LapScore  &  MCFS  &  LGD  &  LLEScore  &  Our Method  \\ \hline
        \hline
        COIL      &  0.6432  &  0.4890  $\pm$  0.0529  &  0.5113  $\pm$  0.0640  &  0.5833  $\pm$  0.0709  &  0.5534  $\pm$  0.0291  &  0.6030  $\pm$  0.0397  &  \textbf{0.6140  $\pm$  0.0283}\\ \hline
        JAFFE  &   0.7526  &  0.5101  $\pm$  0.0657  &  0.7137  $\pm$  0.0876  &  0.6977  $\pm$  0.0359  &  0.6237  $\pm$  0.0298  &  0.6808 $\pm$  0.0402  &  \textbf{0.7510  $\pm$  0.0506}\\ \hline
        UMIST  &   0.4978  &  0.4720  $\pm$  0.0469  &  0.4287  $\pm$  0.0187  &  0.4503  $\pm$  0.0306  &  0.4635  $\pm$  0.0460  &  0.4857  $\pm$  0.0251  &  \textbf{0.5561  $\pm$  0.0123}\\ \hline
        YALEB  &   0.1046  &  0.1022  $\pm$  0.0024  &  0.0962  $\pm$  0.0014  &  0.1242  $\pm$  0.0142  &  0.1071  $\pm$  0.0040  &  0.1341  $\pm$  0.0052  &  \textbf{0.1803  $\pm$  0.0160} \\ \hline
        PIE & 0.2027  &  0.1916  $\pm$  0.0189  &  0.1956  $\pm$  0.0167  &  0.1946  $\pm$  0.0090  &  0.1755  $\pm$  0.0465  &  0.1656  $\pm$  0.0047  &  \textbf{0.3139  $\pm$  0.0324}\\ \hline
        LUNG      &  0.8532  &  0.7957  $\pm$  0.0414  &  0.8090  $\pm$  0.0273  &  0.7455  $\pm$  0.0382  &  0.7420  $\pm$  0.0328  &  0.7825  $\pm$  0.0032  &  \textbf{0.8440  $\pm$  0.0372}\\ \hline
        \hline
        Average & 0.5090  & 0.4268 &  0.4591 &  0.4654 &  0.4442 &  0.4753 &  \textbf{0.5432} \\ \hline
    \end{tabular}
\end{table*}







\begin{figure*}[t]
    \centering
    \subfloat[]{\label{fig:res_COIL20_1440n_1024d_20c_Score_Accuracy}\includegraphics[width=0.33\textwidth]{res_COIL20_1440n_1024d_20c_Score_Accuracy}}
    \subfloat[]{\label{fig:res_COIL20_1440n_1024d_20c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_COIL20_1440n_1024d_20c_Score_NMI}}
    \subfloat[]{\label{fig:res_COIL20_1440n_1024d_20c_Score_Purity} \includegraphics[width=0.33\textwidth]{res_COIL20_1440n_1024d_20c_Score_Purity}}
    \caption{Clustering results w.r.t. different number of selected features on COIL.}
    \label{fig:res_COIL20_1440n_1024d_20c_LKRScore}    
\end{figure*}




\subsection{Evaluation Metrics}
To evaluate their performance, we compare the generated clusters with the ground truth by computing the following three performance measures.

\textbf{Clustering accuracy (ACC)}. The first performance measure is the clustering accuracy, which discovers the one-to-one relationship between clusters and classes. Given a point $\bm{x}_i$, let $p_i$ and $q_i$ be the clustering result and the ground truth label, respectively. The ACC is defined as follows:
\begin{equation}
\textrm{ACC} = \frac{1}{n}\sum_{i=1}^{n}\delta(q_i, map(p_i)),
\end{equation}
where $n$ is the total number of samples and $\delta(x,y)$ is the delta function that equals 1 if $x=y$ and equals 0 otherwise, and $map(\cdot)$ is the permutation mapping function that maps each cluster index to a true class label. The best mapping can be found by using the Kuhn-Munkres algorithm \cite{map}. The greater clustering accuracy means the better clustering performance.


\begin{figure*}[t]
    \centering
    \subfloat[]{\label{fig:res_JAFFE_213n_676d_10c_Score_Accuracy} \includegraphics[width=0.33\textwidth]{res_JAFFE_213n_676d_10c_Score_Accuracy}}    
    \subfloat[]{\label{fig:res_JAFFE_213n_676d_10c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_JAFFE_213n_676d_10c_Score_NMI}}
    \subfloat[]{\label{fig:res_JAFFE_213n_676d_10c_Score_Purity} \includegraphics[width=0.33\textwidth]{res_JAFFE_213n_676d_10c_Score_Purity}}
    %
    \caption{Clustering results w.r.t. different number of selected features on JAFFE.}
    \label{fig:res_JAFFE_213n_676d_10c_LKRScore}    
\end{figure*}



\begin{figure*}[t]
    \centering
    \subfloat[]{\label{fig:res_UMIST_575n_644d_20c_Score_Accuracy} \includegraphics[width=0.33\textwidth]{res_UMIST_575n_644d_20c_Score_Accuracy}}
    \subfloat[]{\label{fig:res_UMIST_575n_644d_20c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_UMIST_575n_644d_20c_Score_NMI}}
    \subfloat[]{\label{fig:res_UMIST_575n_644d_20c_Score_Purity} \includegraphics[width=0.33\textwidth]{res_UMIST_575n_644d_20c_Score_Purity}}
    %   
    \caption{Clustering results w.r.t. different number of selected features on UMIST.}
    \label{fig:res_UMIST_575n_644d_20c_LKRScore}
\end{figure*}

\textbf{Normalized mutual information (NMI)}. Another evaluation metric that we adopt here is the normalized mutual information, which is widely used for determining the quality of clustering. Let $\mathcal{C}$ be the set of clusters from the ground truth and $\mathcal{C'}$ obtained from a clustering algorithm. Their mutual information $MI(\mathcal{C}, \mathcal{C'})$ is defined as follows:
\begin{equation}
\textrm{MI}(\mathcal{C}, \mathcal{C'}) = \sum_{c_i \in \mathcal{C},c'_j \in \mathcal{C'}} p(c_i,c'_j) \log \frac{p(c_i,c'_j)}{p(c_i) p(c'_j)},
\end{equation}
where $p(c_i)$ and $p(c'_j)$ are the probabilities that a data point arbitrarily selected from the data set belongs to the cluster $c_i$ and $c'_j$, respectively, and $p(c_i,c'_j)$ is the joint probability that the arbitrarily selected data point belongs to the cluster $c_i$ as well as $c'_j$ at the same time. In our experiments, we use the normalized mutual information as follows:
\begin{equation}
\textrm{NMI}(\mathcal{C}, \mathcal{C'}) = \frac{ \textrm{MI}(\mathcal{C}, \mathcal{C'})}{\max(H(\mathcal{C}), H(\mathcal{C'}))},
\end{equation}
where $H(\mathcal{C})$ and  $H(\mathcal{C'})$ are the entropies of $\mathcal{C}$ and $\mathcal{C'}$, respectively. Again, a larger NMI indicates a better performance.

\textbf{Purity} measures the extent to which each cluster contained data
points from primarily one class. The purity of a clustering solution is obtained as a weighted sum of individual cluster purity \cite{du2013joint} values and is given by
\begin{align}
\textrm{Purity} = \sum_{i=1}^{c}\frac{n_i}{n}P(S_i), \quad P(S_i) = \frac{1}{n_i} \textrm{max}_j(n_i^j)
\end{align}
where $S_i$ is a particular cluster of size $n_i$, $n_i^j$ is the number of  samples of the $i$-th input class that were assigned to the $j$-th cluster,
$c$ is the number of clusters and $n$ is the total number of points. In general, the larger the values of purity, the better the clustering solution is.

In order to measure the \textbf{redundancy}\cite{zhao2013similarity} among the selected features, the following formula is adopted as
\begin{align}
\textrm{Red}(\mathbf{S})=\frac{1}{m(m-1)}\sum_{\mathbf{f}_i, \mathbf{f}_j \in \mathbf{S}, i\neq j} \rho_{ij}
\end{align}
where $\rho_{ij}$ is the Pearson correlation between two features $\mathbf{f}_i$ and $\mathbf{f}_j$. The measurement assesses the averaged correlation among all feature pairs, and a large value indicates that many selected features are strongly redundant. In general, the smaller the values of redundancy or correlation, the better the selected features.

\begin{figure*}[t]
    \centering
    \subfloat[]{\label{fig:res_YaleB_2414n_1024d_38c_Score_Accuracy} \includegraphics[width=0.33\textwidth]{res_YaleB_2414n_1024d_38c_Score_Accuracy}}
    \subfloat[]{\label{fig:res_YaleB_2414n_1024d_38c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_YaleB_2414n_1024d_38c_Score_NMI}}
    \subfloat[]{\label{fig:res_YaleB_2414n_1024d_38c_Score_Purity} \includegraphics[width=0.33\textwidth]{res_YaleB_2414n_1024d_38c_Score_Purity}}    
    %
    \caption{Clustering results w.r.t. different number of selected features on YALEB.}
    \label{fig:res_YaleB_2414n_1024d_38c_LKRScore}
\end{figure*}




\begin{figure*}[t]
    \centering    
    \subfloat[]{\label{fig:res_PIE_Pose27_2856n_1024d_68c_Score_Accuracy} \includegraphics[width=0.33\textwidth]{res_PIE_Pose27_2856n_1024d_68c_Score_Accuracy}}
    \subfloat[]{\label{fig:res_PIE_Pose27_2856n_1024d_68c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_PIE_Pose27_2856n_1024d_68c_Score_NMI}}
    \subfloat[]{\label{fig:res_PIE_Pose27_2856n_1024d_68c_Score_Purity}\includegraphics[width=0.33\textwidth]{res_PIE_Pose27_2856n_1024d_68c_Score_Purity}}
    %
    \caption{Clustering results w.r.t. different number of selected features on PIE.}
    \label{res_PIE_Pose27_2856n_1024d_68c_LKRScore}
\end{figure*}

\begin{figure*}[t]
    \centering    
    \subfloat[]{\label{fig:res_Lung_203n_3312d_5c_Score_Accuracy} \includegraphics[width=0.33\textwidth]{res_Lung_203n_3312d_5c_Score_Accuracy}}   
    \subfloat[]{\label{fig:res_Lung_203n_3312d_5c_Score_NMI} \includegraphics[width=0.33\textwidth]{res_Lung_203n_3312d_5c_Score_NMI}}
    \subfloat[]{\label{fig:res_Lung_203n_3312d_5c_Score_Purity}\includegraphics[width=0.33\textwidth]{res_Lung_203n_3312d_5c_Score_Purity}}
    %
    \caption{Clustering results w.r.t. different number of selected features on LUNG.}
    \label{res_Lung_203n_3312d_5c_LKRScore}
\end{figure*}


\subsection{Parameters Settings}
There are some parameters to be set in advance. We set the size of neighborhoods  $k = 5$ on all the datasets for all these compared feature selection algorithms except MAXVAR. The weight of $k$-nn graph for LapScore is based on the Gaussian kernel, where the kernel width is set to the mean distance between any two data examples as suggested in \cite{optsigma}. For LLEScore, the regularization parameter is set to $10^{-5}$ as in \cite{llescore}. Compared to most embedded unsupervised feature selection methods which often have the difficulty of unrealistic grid-search of several parameters, it can be seen that the parameters for all these compared unsupervised feature selection can be relatively easy to set in advance.



\subsection{Clustering with Selected Features}

With the selected features, we evaluate the performance in terms of $k$-means clustering by three widely used metrics, i.e., Accuracy (ACC) and Normalized Mutual Information (NMI) and Purity. The results of $k$-means clustering depend on the initialization. For all the compared algorithms with different parameters and different number of selected features, we first repeat the clustering 20 times with random initialization and record the average results.

Since the optimal number of selected features is unknown in advance, to better evaluate the performance of unsupervised feature selection algorithms, we finally report the averaged results over different number of selected features (the range of selected features for each data set can be found in Table \ref{dataset}) with standard derivation. 

The clustering results in terms of ACC, NMI and Purity are reported in Table \ref{res_acc}, Table \ref{res_nmi} and Table~\ref{res_purity}, respectively. For different feature selection algorithms, the results in each cell of Table \ref{res_acc}, Table \ref{res_nmi} and \ref{res_purity} are the mean $\pm$ standard deviation. The last row of Table \ref{res_acc}, Table \ref{res_nmi} and \ref{res_purity} show the averaged results of all the algorithms over the 6 datasets. The best results with highest value of these unsupervised feature selection algorithms are highlighted in boldface.

Compared with clustering using all features, our method can achieve comparable results with less than 50 features. These results can well demonstrate the effectiveness and efficiency of unsupervised feature selection algorithm. These unsupervised feature selection algorithms not only can largely reduce the number of features facilitating the latter learning process, but can also often improve the clustering performance. It can also be observed that our method consistently produces better performance than the other unsupervised feature selection algorithms. In particular, our method achieves $20.87\%$, $19.85\%$ and $14.29\%$ improvement in terms of accuracy, NMI and purity respectively with the second best algorithm. 

The details of clustering results on each data set have been shown in Fig~\ref{fig:res_COIL20_1440n_1024d_20c_LKRScore}, \ref{fig:res_JAFFE_213n_676d_10c_LKRScore}, \ref{fig:res_UMIST_575n_644d_20c_LKRScore}, \ref{fig:res_YaleB_2414n_1024d_38c_LKRScore},  \ref{res_PIE_Pose27_2856n_1024d_68c_LKRScore},  \ref{res_Lung_203n_3312d_5c_LKRScore}. The squared black line denotes the proposed method, other methods include AllFea, MaxVar, LapScore, MCFS, LGD and LLEScore. It can be observed that our method largely improves the clustering results under most of the feature numbers. 

\subsection{Comparison of Redundancy}
To fully investigate the effectiveness of the proposed method, we take the redundancy metric to measure the redundancy of the selected subset of features. The results in terms of redundancy are reported in Table \ref{res_red}. For different feature selection algorithms, the results in each cell of Table \ref{res_red} are the mean $\pm$ standard deviation. The last row of Table \ref{res_red} shows the averaged results of all the algorithms over the 6 datasets. The best results with lowest value of these unsupervised feature selection algorithms are highlighted in boldface.

From Table \ref{res_red}, we can see that: our method achieves the lowest redundancy on selected features. Compared with the second lowest algorithm MCFS, we further get $25.83\%$ improvement. The details of redundancy on different number of selected features are provided in Fig. \ref{res_redundancy_01}. The redundancy for features selected by filter-based methods, i.e., MaxVar, LapScore and LLE score are higher than other two methods. These results well demonstrate that the clustering results could be improved with more compact subset of features by redundancy minimization.

\begin{table*}
    \caption{Redundancy on different datasets with $k$=5 (mean $\pm$ std).}
    \centering 
    \label{res_red}
    \begin{tabular}{| c  || c || c | c | c | c | c |  c |}
        \hline
        Data Sets   &   AllFea  & MaxVar  &  LapScore  &  MCFS  &  LGD  &  LLEScore  &  Our Method  \\ \hline
        \hline
        COIL	&  0.1512 	&  0.7916 	$\pm$  0.0615 	&  0.7002 	$\pm$  0.1366 	&  \textbf{0.1313 	$\pm$  0.0335} 	&  0.5655 	$\pm$  0.0849 	&  0.5880 	$\pm$  0.0968 	&  0.2842 	$\pm$  0.1159\\ \hline
        JAFFE	&  0.2245 	&  0.5791 	$\pm$  0.1855 	&  0.4692 	$\pm$  0.2164 	&  \textbf{0.2249 	$\pm$  0.0297} 	&  0.3477 	$\pm$  0.0436 	&  0.2412 	$\pm$  0.0417 	&  0.3297 	$\pm$  0.1168\\ \hline
        UMIST	&  0.1669 	&  0.4332 	$\pm$  0.1910 	&  0.5957 	$\pm$  0.1123 	&  0.4392 	$\pm$  0.1097 	&  0.2657 	$\pm$  0.0444 	&  \textbf{0.1979 	$\pm$  0.0530} 	&  0.3302 	$\pm$  0.0621\\ \hline
        YALEB	&  0.3133 	&  0.8639 	$\pm$  0.0786 	&  0.3239 	$\pm$  0.0668 	&  0.8086 	$\pm$  0.0314 	&  0.8590 	$\pm$  0.0978 	&  0.6004 	$\pm$  0.0347 	&  \textbf{0.2474 	$\pm$  0.0350} \\ \hline
        PIE	&  0.3836 	&  0.8846 	$\pm$  0.0352 	&  0.8923 	$\pm$  0.0109 	&  0.4144 	$\pm$  0.0485 	&  0.3948 	$\pm$  0.0702 	&  0.7247 	$\pm$  0.0597 	&  \textbf{0.3668 	$\pm$  0.0493 } \\ \hline
        LUNG	&  0.0359 	&  0.0659 	$\pm$  0.0234 	&  0.4168 	$\pm$  0.1471 	&  0.0870 	$\pm$  0.0236 	&  0.0476 	$\pm$  0.0244 	&  0.2816 	$\pm$  0.0737 	&  \textbf{0.0033 	$\pm$  0.0690} \\ \hline
        \hline
        Average	&  0.2126	&  0.6031 			&  0.5664 			&  0.3509 		&  0.4134 	 		&  0.4390 		&  \textbf{0.2603} 	\\ \hline        
    \end{tabular}
\end{table*}

\begin{figure*}[t]
    \centering
    \subfloat[]{\label{fig:res_COIL20_1440n_1024d_20c_LKRScore_Redundancy} \includegraphics[width=0.33\textwidth]{res_COIL20_1440n_1024d_20c_Score_Redundancy}}
    \subfloat[]{\label{fig:res_JAFFE_213n_676d_10c_Score_Redundancy} \includegraphics[width=0.33\textwidth]{res_JAFFE_213n_676d_10c_Score_Redundancy}}
    \subfloat[]{\label{fig:res_UMIST_575n_644d_20c_LKRScore_redundancy} \includegraphics[width=0.33\textwidth]{res_UMIST_575n_644d_20c_Score_Redundancy}}
    \\
    \subfloat[]{\label{fig:res_YaleB_2414n_1024d_38c_LKRScore_redundancy} \includegraphics[width=0.33\textwidth]{res_YaleB_2414n_1024d_38c_Score_Redundancy}}
    \subfloat[]{\label{fig:res_PIE_Pose27_2856n_1024d_68c_LKRScore_redundancy}\includegraphics[width=0.33\textwidth]{res_PIE_Pose27_2856n_1024d_68c_Score_Redundancy}}
    \subfloat[]{\label{fig:res_Lung_203n_3312d_5c_LKRScore_redundancy}\includegraphics[width=0.33\textwidth]{res_Lung_203n_3312d_5c_Score_Redundancy}}
    \caption{Redundancy of the selected features with $k$=5 on  COIL, JAFFE, UMIST, YALEB, PIE and LUNG, respectively.}
    \label{res_redundancy_01}
\end{figure*}


\section{Conclusion}
In this paper we construct the simple $k$-nearest neighbor graphs using all candidate features and single feature to characterize the local structure of data. We further propose to minimize the linear reconstruction weight between the nearest neighbor graphs constructed from all candidate features and each single feature. Our method with the global optimal weights actually selects those features with highest relevance and lowest redundancy simultaneously, which makes it more suitable for the task of unsupervised feature selection.  Experimental results on many benchmark data sets demonstrate that the proposed method outperforms many state-of-the-art unsupervised feature selection methods.
%
%\appendices

%\section{Algorithm for The Problem in (12)}
%In this section, the general augmented Lagrangian multiplier (ALM) method is employed to solve problem \eqref{score2}.
%
%By introducing the auxiliary variable $\mathbf{z}$, the problem in \eqref{score2} can be rewritten as 
%\begin{align}\label{score3}
%\min_{\mathbf{w}, \mathbf{z}} \quad & \mathbf{z}^T\mathbf{H} \mathbf{w} - 2 \mathbf{z}^T \mathbf{b}\\
%\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0, \mathbf{z} = \mathbf{w}. \nonumber
%\end{align}
%By using general augmented Lagrangian multiplier (ALM) method, the problem in \eqref{score3} can be further rewritten as 
%\begin{align}\label{score4}
%\min_{\mathbf{w}, \mathbf{z}} \quad & \mathbf{z}^T\mathbf{H} \mathbf{w} - 2 \mathbf{z}^T \mathbf{b} +\frac{\mu}{2} ||\mathbf{w} - \mathbf{z} + \frac{1}{\mu}\Sigma||_{2}^{2}\\
%\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. \nonumber
%\end{align}
%where $\Sigma$ is the Lagrangian multiplier, and $\mu$ is a quadratic
%penalty parameter.
%
%Then, the above problem can be solved by the following alternative optimization method.
%
%\textbf{Optimizing w.r.t. $\mathbf{z}$ when $\mathbf{w}$ is fixed}
%
%By taking the derivative of problem \eqref{score4} with respect to $\mathbf{z}$ and setting it to zero, we have
%\begin{align}
%\mathbf{z} = \mathbf{w} + \frac{1}{\mu}(\Sigma + \mathbf{b} - \mathbf{H}\mathbf{w})
%\end{align}
%
%\textbf{Optimizing w.r.t. $\mathbf{w}$ when $\mathbf{z}$ is fixed}
%When $\mathbf{z}$ is fixed, the problem in \eqref{score4} is simplified as
%\begin{align}\label{score5}
%\min_{\mathbf{w}} \quad & \mathbf{z}^T\mathbf{H} \mathbf{w} + \frac{\mu}{2} ||\mathbf{w} - \mathbf{z} + \frac{1}{\mu}\Sigma||_{2}^{2}\\
%\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. \nonumber
%\end{align}
%which can be rewritten as 
%\begin{align}\label{score6}
%\min_{\mathbf{w}} \quad & \frac{\mu}{2} ||\mathbf{w} - \mathbf{z} + \frac{1}{\mu}(\Sigma + \mathbf{H} \mathbf{z})||_{2}^{2}\\
%\st \quad & \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. \nonumber
%\end{align}
%Let $\mathbf{s} = \mathbf{z} + \frac{1}{\mu}(\Sigma + \mathbf{H} \mathbf{z})$, the above problem reduces to 
%\begin{align}\label{score7}
%\min_{\mathbf{w}} \quad   ||\mathbf{w} - \mathbf{s}||_{2}^{2} \quad \st \quad  \sum_{i=1}^{d} w_i = 1, \mathbf{w} \geq 0. 
%\end{align}
%Clearly, the optimization problem in \eqref{score7} is a Euclidean projection on the simplex. The global optimal solution of \eqref{score7} can be find by the  piecewise root finding algorithm in linear time \cite{root2011}\cite{Nie2018A}.


\bibliographystyle{IEEEtran}
\bibliography{fsasl}


\EOD


\end{document}
