% !TEX root = ../ICDM2011.tex
% 
\section{Experiments}\label{sec:exp}
In this section, we empirically evaluate the effectiveness and efficiency of the algorithm (\ALG) proposed in Section~\ref{sec:appr}. We first describe the data sets, give the evaluation criteria and then discuss the performance of \ALG\ over different data sets and over varying parameter settings.

\subsection{Data Sets}
We generate auxiliary-target pairs from two benchmark text datasets - \textit{20 newsgroups\footnote{http://people.csail.mit.edu/jrennie/20Newsgroups/}} (20NG) and \textit{New York Times\footnote{
http://www.nytimes.com/}} (NYTimes).

Here we briefly summarize how to generate each dataset. For each dataset, we randomly select at least 2 categories as the target dataset and then select equivalent categories as the auxiliary dataset.

Specifically, for 20NG, we generate 6 clustering tasks using the 20 categories, 5 of which are binary clustering problems and 1 of which is multi clustering problem. The detail of the tasks is shown in Table~\ref{tb2}.

For NYTimes, we collect the news web pages from Jan. 2009 to Mar. 2011, with total 109,157 stories. The web pages are divided into 27 categories, however, some of them include too few documents. We carefully select some ``large" categories to induct the experiments. Similarly, we create 6 clustering tasks consisting of 5 binary ones and 1 multi one. More information of the tasks can be obtained in Table~\ref{tb3}.
\begin{table}
\centering
\caption{The description of each task in NYTimes}\label{tb3}
\begin{tabular}{|c|l|l|}
\hline
Dataset & Auxiliary & Target\\
\hline
I & automobiles, realestate &  theater, weekinreview\\ \hline
II & magazine, science & books, education\\ \hline
III & dining, fashion & movies, technology\\ \hline
IV & garden, jobs & weekinreview, your-money\\ \hline
V & health, science & travel, washington\\ \hline
VI & arts, business, nyregion & opinion, sports, world\\ \hline
\end{tabular}
\end{table}
\begin{table*}
\centering
\caption{The description of each task in 20NG}\label{tb2}
\begin{tabular}{|c|l|l|}
\hline
Dataset & Auxiliary & Target\\
\hline
I & rec.autos, rec.motorcycles &  talk.politics.guns, talk.politics.misc\\ \hline
II & alt.atheism, misc.forsale & rec.sport.baseball, sci.med\\ \hline
III & rec.autos, talk.politics.mideast & comp.graphics, misc.forsale\\ \hline
IV & comp.windows.x, sci.space & comp.os.ms-windows.misc, sci.electronics\\ \hline
V & comp.sys.mac.hardware, sci.space & soc.religion.christian, talk.politics.guns\\ \hline
VI & alt.atheism, comp.sys.ibm.pc.hardware, misc.forsale & rec.sport.hockey, sci.crypt, talk.politics.mideast\\ \hline
\end{tabular}
\end{table*}

\subsection{Evaluation Criteria and Baselines}
 In these experiments, we use \textit{entropy} to measure the quality of clustering performance, which reveals the purity of clusters. More specifically, the \textit{entropy} of a cluster $\mathsf{y}$ is defined as,
 \begin{equation}\label{eq18}
 \mathcal{E}(\mathsf{y})=-\sum_{c\in\mathcal{C}}p(c|\mathsf{y})\log_2p(c|\mathsf{y}),
 \end{equation}
 where $c$ represents a class label in the evaluation and $p(c|\mathsf{y})$ is formulated as,
 \begin{equation}\label{eq19}
p(c|\mathsf{y})=\frac{|\{\mathsf{x}|t(\mathsf{x})=c\wedge h(\mathsf{x})=\mathsf{y}\}|}{|\mathsf{y}|},
 \end{equation}
 where $t(\mathsf{x})$ denotes the true label of $\mathsf{x}$ in the evaluation. The overall \textit{entropy} for the whole clustering is defined as the weighted sum of the entropy with respect to all the clusters; formally,
 \begin{equation}\label{eq20}
 \mathcal{E}(\mathcal{Y})=\sum_{\mathsf{y}\in\mathcal{Y}}\frac{|\mathsf{y}|}{N}\mathcal{E}(\mathsf{y}).
 \end{equation}
 The overall \textit{entropy} $\mathcal{E}(\mathcal{Y})$ is used to evaluation the results of clustering.
 
 In the following experiments, we choose one traditional clustering algorithm: K-means (KM)~\cite{hartigan1979algorithm} and three state of art transfer clustering methods: Self-taught Clustering (STC)~\cite{DBLP:conf/icml/DaiYXY08}, Cross-Guided Clustering (CGC)~\cite{DBLP:conf/icdm/BhattacharyaGJV09} and Multitask Bregman Clustering (MBC)~\cite{DBLP:conf/aaai/ZhangZ10} for the purpose of comparison.
 

\subsection{Comparison with Baselines}
Table~\ref{tb4} and ~\ref{tb5} present the clustering performance in terms of entropy according to each data set and each clustering method. KM\_S means using the k-means algorithm simply on the target data set for clustering, which does not take the auxiliary data set into account. An alternation is KM\_C which combines the target data set and the auxiliary data set for clustering. However, we only evaluate the performance over target data set (the auxiliary data are ignored when evaluation).

From Table~\ref{tb4}, we can find that KM\_C and CGC perform somewhat worse than KM\_S. We believe that the reason may be the target data set and the auxiliary data set are not relevant enough. It is worthy mentioning that both KM and CGC are sensitive to the initial centroids. More specifically, the main idea of CGC is to make use of auxiliary data set to help build the initial centroids. Therefore, when using KM\_S or CGC, the auxiliary data may bias the centroids and lead to poor performance. As to STC and MBC, although they have comparatively good results compared to KM\_S and KM\_C, they are defeated by our algorithm \ALG. This is because that the two algorithms consider auxiliary data set and target data set equivalently and give auxiliary data a fixed weight no matter what circumstances are. Generally speaking, our algorithm \ALG\ greatly outperforms the other five baseline methods. We believe this is the benefit of our ``adaptive" idea, which aims at maximizing the utility of auxiliary data set according to its similarity to target data set.

From Table~\ref{tb5}, we can see that KM\_C, STC, CGC and MBC may lead to negative transfer, which consequently performance worse than KM\_S. This can be explained since all these methods cannot solve problems of ``whether to transfer" and ``how much to transfer" adequately. If the algorithm sets the weight or influence of the auxiliary data more than it should have, it may lead to negative transfer. On the other hand, if the weight of the auxiliary data is less than it can hold, it may result in insufficient transfer, which means it does not maximize the utility of the auxiliary data. Also, our algorithm \ALG\ performs better than other clustering methods.
\begin{table}
%\begin{minipage}[b]{0.45\textwidth}
\centering
\caption{Performance of entropy in each dataset of 20NG}\label{tb4}
\begin{tabular}{|c||c|c|c|c|c|c|}
\hline
Dataset & KM\_S & KM\_C & STC & CGC & MBC & \ALG\\
\hline
I & 0.990 & 0.990 & 0.986 & 0.995 & 0.881 & \textbf{0.674}\\ %\hline
II & 0.960 & 0.994 & 0.484 & 0.993 & 0.448 & \textbf{0.368}\\ %\hline
III & 0.998 & 0.999 & 0.793 & 0.988 & 0.444 & \textbf{0.375}\\ %\hline
IV & 0.992 & 0.997 & 0.660 & 0.957 & 0.464 & \textbf{0.435}\\ %\hline
V & 0.997 & 0.998 & 0.854 & 0.996 & 0.571 & \textbf{0.256}\\ %\hline
VI & 1.513 & 1.306 & 0.931 & 1.504 & 0.902 & \textbf{0.851} \\ \hline
average & 1.075 & 1.047 & 0.785 & 1.072 & 0.618 & \textbf{0.493}\\ \hline
\end{tabular}
\end{table}
%\vspace{-0.5in}
\begin{table}
%\begin{minipage}[b]{0.45\textwidth}
\centering
\caption{Performance of entropy in each dataset of NYTimes}\label{tb5}
\begin{tabular}{|c||c|c|c|c|c|c|}
\hline
Dataset & KM\_S & KM\_C & STC & CGC & MBC & \ALG\\
\hline
I & 0.868 & 0.525 & 0.534 & 0.662 & 0.525 & \textbf{0.409}\\ %\hline
II & 0.955 & 0.910 & 0.919 & 0.910 & 0.918 & \textbf{0.858}\\ %\hline
III & 0.973 & 0.973 & 0.893 & 0.937 & 0.966 & \textbf{0.805}\\ %\hline
IV & 0.840 & 0.530 & 0.861 & 0.872 & 0.588 & \textbf{0.490}\\ %\hline
V & 0.300 & 0.400 & 0.401 & 0.404 & 0.400 & \textbf{0.267}\\ %\hline
VI & 1.559 & 1.548 & 1.496 & 1.501 & 1.482 & \textbf{1.383} \\ \hline
average & 0.916 & 0.814 & 0.851 & 0.881 & 0.813 & \textbf{0.702}\\ \hline
\end{tabular}
\end{table}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\comment{
\begin{table*}
\centering
%\caption{Performance of  in 20NG}\label{tb4}
\begin{tabular}{|c|c|c|c|c|c|}
\hline 
1 & 2 & 3 & 4&5&6\\ \hline
\end{tabular}
\begin{tabular}{|c|c|c|c|c|c|}
\hline 
1 & 2 & 3 & 4 &5&6\\ \hline
\end{tabular}
\end{table*}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Impact of Auxiliary Dataset Selection}
In this subsection, we try to analyze the impact of auxiliary dataset selection on performance of our algorithm (\ALG). For every target dataset in 20NG except the last one, we combine it with each of the first 5 auxiliary datasets to create new clustering tasks and perform our algorithm \ALG\ over them. The results are shown in Figure~\ref{fig4}. For instance, in Figure~\ref{fig4a}, the entropy of II is the result combing I's target data and II's auxiliary data. Since the auxiliary data of II includes the same topic misc.forsale as target data of III, we do not combine them for the experiment. As a result, point II is absent in the x-axis of Figure~\ref{fig4c}. From Figure~\ref{fig4}, we can see that the performance of our algorithm is not worse than KM\_S. More specifically, our algorithm greatly outperforms KM\_S at most of the comparisons. We believe that our algorithm (\ALG) can properly transfer the knowledge from auxiliary data according to the relevance of auxiliary data and target data and avoid negative transfer. In addition, we can also observe that, if the two datasets are highly related, \ALG\ can transfer much knowledge from the auxiliary data and the entropy decreases dramatically. On the other hand, if the two datasets are not so relevant, \ALG\ transfers little information or even does not transfer, but will not do harm. \ALG\ realizes ``adaptive transfer" to avoid negative transfer and maximize the utility of auxiliary data set.
%\comment{
\begin{figure*}%
%\centering
\subfigure[Target data I]{\label{fig4a}\includegraphics[width=0.18\textwidth]{data1}}
%...figure code...
%\hspace{1in}
\subfigure[Target data II]{\label{fig4b}\includegraphics[width=0.18\textwidth]{figure/data2}}
\subfigure[Target data III]{\label{fig4c}\includegraphics[width=0.18\textwidth]{figure/data3}}
\subfigure[Target data IV]{\label{fig4d}\includegraphics[width=0.18\textwidth]{figure/data4}}
\subfigure[Target data V]{\label{fig4e}\includegraphics[width=0.18\textwidth]{figure/data5}}
\caption{Impact of Auxiliary Dataset Selection on 20NG 5 datasets (described in Table~\ref{tb2})}\label{fig4}

\end{figure*}%}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Choosing Number of Feature Clusters}
Since our algorithm (\ALG) requires the number of feature clusters $C_\mathcal{F}$ as the input, we should estimate the impact of $C_\mathcal{F}$ and choose a specific value for it in our experiments. 	In reality, this number should be carefully tuned. In this paper, we tune it according to the empirical results. Figure~\ref{fig5} represents the entropy curve with respect to different numbers of feature clusters. We set the number of feature clusters from 2 to 32, making it increase exponentially. We conduct the experiments on II of 20NG. From Figure~\ref{fig5}, we can see that the performance is sensitive to the value of $C_\mathcal{F}$ when $C_\mathcal{F}$ is less than 8 and the entropy decreases as $C_\mathcal{F}$ becomes larger. However, when $C_\mathcal{F}$ is larger than 8, the entropy becomes stable. As a result, we believe that a number of feature clusters which is no less than 16 will be sufficiently enough to make our algorithm (\ALG) perform well. In other part of experiments, we set the number of feature clusters to 16 to make our algorithm \ALG\ behave well.
\begin{figure}%
\centering
\includegraphics[width=0.4\textwidth]{figure/feature.pdf} 
\centering
\vspace{-0.2in}
\caption{The entropy curves with different numbers of  feature clusters}\label{fig5}
\vspace{-0.15in}
\end{figure}%}
\subsection{Setting Number of Iteration Times}
Since our algorithm \ALG\ is iterative, it is crucial to test its convergence quality. Here, we empirically show the convergence property of \ALG. Figure~\ref{fig6} shows the performance over entropy according to the different numbers of iteration times on II of 20NG. From the Figure~\ref{fig6}, we can see that our algorithm \ALG\ shows convergence property after 8 iterations. Meanwhile, the entropy decreases after every iteration before it becomes stable and reaches the lowest point when \ALG\ converges. In addition, we can find that the entropy decreases very fast when the number of iteration times is less than 8. It reveals the property that our algorithm \ALG\ makes the entropy drop dramatically without too many iterations before it achieves stable state. In other experiments, we set the number of iteration times to 15. We believe that 15 iteration times are enough to make our algorithm \ALG\ converge.
\begin{figure}%
\centering
\includegraphics[width=0.4\textwidth]{figure/iteration.pdf} 
\centering
\vspace{-0.2in}
\caption{The entropy curves with different numbers of iteration times}\label{fig6}
\vspace{-0.15in}
\end{figure}%} 

\subsection{Running Time Analysis}
In this section, we empirically show the time property of our algorithm \ALG, though in Section~\ref{sec:appr} we have theoretically concluded that it runs on linear time with respect to the sum of numbers of non-zero feature value in $\mathcal{X}_{T}$ and $\mathcal{X}_{S}$. In Figure~\ref{fig7}, x-axis represents the total number of non-zero feature value in both $\mathcal{X}_{T}$ and $\mathcal{X}_{S}$ where $N$ and $M$ denote the number in $\mathcal{X}_{T}$ and $\mathcal{X}_{S}$ respectively. Y-axis is set to be the time consumed by one iteration. We conduct this experiment on NTYimes: II. With Figure~\ref{fig7}, we can observe that the time consumed by \ALG\ increases roughly linearly along with the increasing of the data size, which echoes our theoretical analysis. In addition, our algorithm \ALG\  uses less than 100 seconds when the size of non-zero feature value reaches about 1,141,680, which illustrates \ALG\ speedy. As a result, from Figure~\ref{fig7}, we can believe that our algorithm \ALG\ has good scalability due to its linearity and promptness.
\begin{figure}%
\centering
\includegraphics[width=0.4\textwidth]{figure/time} 
\centering
\vspace{-0.15in}
\caption{Running time of different data size}\label{fig7}
\vspace{-0.15in}
\end{figure}%}  
 
\comment{ Our evaluation uses the Web pages crawled from the Open Directory Project(ODP)\footnote{http://www.dmoz.com/.} during May 2010, including categories of Arts, Computers, Games, Health, Home, News, Recreation, Reference, Science and Shopping. Each Web page in ODP was classified by human experts.
\comment{
\begin{table}
\centering
\caption{The description for all collected English and Chinese Web pages from ODP}\label{table:1}
\begin{tabular}{|l|r|r|}\hline
Category&Chinese Pages&English Pages\\ \hline
Arts&1,838&227,775\\
Business&8,485&230,048\\
Computers&2,274&109,836\\
Games&768&51,201\\
Health&1,397&57,636\\
Home&273&25,799\\
Kids and Keens&359&45,837\\
News&649&8,508\\
Recreation&722&94,714\\
Reference&2,868&55,433\\
Science&1,298&113,775\\
Shopping&769&88,161\\
Society&2,655&228,087\\
Sports&338&92,950\\ \hline
\end{tabular}
\vspace{-0.15in}
\end{table}

\begin{table}
\centering
\caption{The composition for each datasets}\label{table:2}
\begin{tabular}{|c|l|}\hline
Dataset &Categories\\ \hline
1 &Games, News\\ \hline
2& Arts, Computers\\ \hline 
3&Recreation, Science\\ \hline
4& News, Recreation\\ \hline
5&Health, Home\\ \hline
6&Recreation, Reference, Shopping\\ \hline
\end{tabular}
\vspace{-0.2in}
\end{table}
}
%\subsection{Data Preparation}
We preprocess the raw data as follows. First, all the Chinese Web pages are translated into English by Google Translator\footnote{http://www.google.com/language\_tools.}. Then, we transform all the letters to lowercase, and stem the words using the Porter's stemmer~\cite{Porter}. Afterwards, stop words are removed. \comment{In order to reduce the size of the feature space, cut down the useless features and speed up the classification, we use a simple filter \textit{RemoveUseless} in weka\cite{Hall:Weka}. Next, for the purpose of executing feature weighting, we selected  \textit{PKIDiscretize} \cite{Witten:Data} filter to discretize real attributes. Moreover, In order to reduce the noise and avoid enlarging the little deviation, we commit a discretization on each feature $A_i$ using PKIDiscretize\cite{Witten:Data}\comment{ with internals $V_i = \{ v_{i1},\cdots, v_{il}\}$($il$ is automatically determined by PKIDiscretize). }}

In order to evaluate our algorithm, we set up six cross-language classification tasks. Five of them are binary classification tasks, and the other one is for three-class classification. We randomly resample 50000 instances from English Web pages as the training set due to the computational issue.

%\subsection{Baselines \& Evaluation Metrics}
In the following experiments, we choose two traditional classifiers: Naive Bayes\comment{~\cite{NB}} and Support Vector Machines (SVM)~\cite{CC01a}, and four transfer learning approaches: Transductive SVM (TSVM)~\cite{DBLP:conf/icml/Joachims99}, Information Bottleneck (IB)~\cite{Ling:Can}, Transfer Component Analysis (TCA)~\cite{DBLP:conf/ijcai/PanTKY09} and domain adaptation with Extracting Discriminative Concepts (EDC)~\cite{DBLP:conf/kdd/ChenLTW09} for the purpose of comparisons. $\lambda_{W_F}$ and $\lambda_{W_I}$ are set to 0.05.

With the help of the knowledge from the source domain, transfer learning aims at predicting labels for instances in the target domain with classification performance as close as possible to the traditional classification scenarios, training a classifier with instances in the target domain and applying it to the instances in the same domain.\comment{ We believe that comparisons between the transfer learning algorithms and the algorithms under traditional classification scenarios could be a good idea to evaluate the transfer learning algorithms themselves. Although we address the issue of transfer learning in this paper, if we have enough training data in the target domain, we should have a classifier better than that from transfer learning, which can be viewed as a ``upper bound" of the transfer learning algorithms.} In particular, for the purpose of comparisons, we implement a ``upper bound" algorithm by committing 5-fold cross-validations with NB, SVM and TSVM individually over the data in the target domain (the Chinese text), which are called NB-CN, SVM-CN and TSVM-CN respectively. Note that, these classifiers are virtual ``enemies"  against the transfer learning algorithms. If the gap between a specific transfer learning algorithm and the virtual ``enemies" is narrow, the transfer learning algorithm is close to the ``limit"; otherwise, there is still space to improve. Precision, recall and $F_1$-measure are calculated in each experiment in this paper, which are widely used as evaluation metrics in text classification. \comment{Besides, we look $F_1$-measure as the intergrated criterion to evaluate an algorithm. We believe a method with higher value of $F_1$-measure has better capability.}
\comment{
\begin{equation*}
P = \frac{1}{\vert\mathcal{Y}\vert}\sum_{y \in \mathcal{Y}}\frac{\vert\lbrace \x |\x \in \mathcal{D}^{T}_{T} \wedge f_P(\x) = f_T(\x) = y\rbrace\vert}{\vert\lbrace \x|\x \in \mathcal{D}^{T}_{T} \wedge f_P(\x) = y\rbrace\vert},
\end{equation*}
and 
\begin{equation*}
R = \frac{1}{\vert\mathcal{Y}\vert}\sum_{y \in \mathcal{Y}}\frac{\vert\lbrace \x |\x \in \mathcal{D}^{T}_{T} \wedge f_P(\x) = f_T(\x) = y\rbrace\vert}{\vert\lbrace \x|\x \in \mathcal{D}^{T}_{T} \wedge f_T(\x) = y\rbrace\vert}.
\end{equation*}
$F_1$-measure is a harmonic mean of precision and recall defined as $F_1 = {2PR}/(P+R).$}

%%\begin{equation}
%%%\vspace{-0.2in}
%%\end{equation}
\comment{
\begin{figure}[htbp]
\centering
%\vspace{-0.1in}
\includegraphics[width=0.47\textwidth]{Figure/size}
\caption{Impact of Number of Instances}\label{fig:size}
%\vspace{-0.2in}
\end{figure}
\subsection{Impact of Number of Instances}
Figure~\ref{fig:size} shows the impact of number of instances on \ALG. Instances are acquired by random sampling. For both two tasks, the $F_1$-measure increases with the number of instances becoming larger. In addition, when the number of instances becomes larger, the benefit of \ALG\ is shown significantly since the improvement of $F_1$-measure is apparant for a specific datasets pair. As shown in Figure~\ref{fig:size}, the effect of \ALG\ is apparant when the number of instances is 50000. So, we randomly resample the training data to select 50000 instances to conduct the following experiments due to the computing resources limit.
}
\subsection{Impact of Iteration Times}
Since our algorithm \ALG\ is an iterative algorithm, an important factor of \ALG\ is the number of iterations ($iterNum$) or the convergence speed. We run a few tests to observe the convergence speed. {Figure~\ref{fig:iter}} shows the impact of iteration times on \ALG\ in one of these tests. F1-measure at zero point indicates the value of the algorithm without any feature and instance weighting process. \ALG\ usually converges at the two or third iteration. In the following experiments, we set $iterNum$ to be 3 to make sure that \ALG\ is to converge.
\begin{figure}[htb]
\centering
\includegraphics[width=0.45\textwidth]{Figure/iteration} 
\vspace{-0.1in}\caption{Impact of Iteration Times}\label{fig:iter}
\vspace{-0.1in}
\end{figure}
\comment{
\subsection{Impact of Threshold}
In our experiments, we set the prediction thresholds $\mathbf{t_c} = \{0.1,0.2,\dots,0.9\}$. More specifically, we use a classifier to output the probability distribution of $y_{T_j}$ given $\x_{T_i}$. Then, based on the predictive probability of an instance $\x$ and a prediction threshold, we can map $\x \in \mathcal{X}_{T}$ to a class label. {Figures~\ref{fig:thre1}-\ref{fig:thre3}} show the impact of $\mathbf{t_c}$ on three classification tasks. From {Figures~\ref{fig:thre1}-\ref{fig:thre3}}, we observe that \ALG\ always shows its advantages of improving the classification performance over the whole range of $\mathbf{t_c}$. That is, the evaluation is not sensitive to the selection of the prediction threshold.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.45\textwidth]{Figure/thresholdGN} 
\caption{Impact of Prediction Threshold on the task of Games vs. News}\label{fig:thre1}
%\end{figure}
%
%\begin{figure}[t]
\centering
\includegraphics[width=0.475\textwidth]{Figure/thresholdRS} 
\caption{Impact of Prediction Threshold on the task of Recreation vs. Science}\label{fig:thre2}
%\end{figure}
%\begin{figure}[htbp]
\centering
\includegraphics[width=0.475\textwidth]{Figure/thresholdNR} 
\caption{Impact of Prediction Threshold on the task of News vs. Recreation}\label{fig:thre3}
\end{figure}
}
\begin{figure*}[htb]
\centering
%\subfigure{
\includegraphics[width=0.425\textwidth]{Figure/ON-GN} 
%\vspace{0.1in}
%}
%\subfigure{
\includegraphics[width=0.425\textwidth]{Figure/ON-RS} 
%}
\vspace{-0.09in}
\caption{Comparison with Instance Weighting Only}\label{fig:on}
%\vspace{-0.1in}
\end{figure*}
\begin{table*}[t]
\centering
\caption{Comparisons with other Transfer Learning methods 1:Games vs. News, 
2:Health vs. Home, 3:News vs. Recreation, 4:News vs. Recreation, 5:Recreation vs. Science and 6:Recreation vs. Reference vs. Shopping.}\label{tbl:TCAEDC}
\begin{tabular}{|c|c c c|c c c|c c c|}\hline
\multirow{2}{*}{Data set}&\multicolumn{3}{|c|}{Precision}&\multicolumn{3}{|c|}{Recall}&\multicolumn{3}{|c|}{F1-measure}\\ \cline{2-10}
&TCA&EDC&\ALG &TCA&EDC&\ALG &TCA&EDC&\ALG \\ \hline
1&0.833&0.808&0.835&0.759&0.813&0.823&0.794&0.810&0.829\\
2&0.803&0.703&0.848&0.607&0.712&0.686&0.691&0.706&0.758\\
3&0.77&0.783&0.889&0.846&0.802&0.849&0.806&0.792&0.868\\
4&0.815&0.775&0.856&0.720&0.694&0.701&0.765&0.732&0.771\\
5&0.845&0.830&0.953&0.876&0.840&0.831&0.860&0.835&0.888\\
6&0.533&0.561&0.614&0.666&0.524&0.633&0.592&0.542&0.623\\
\hline
Average&0.767&{0.743}&\textbf{0.833}&{0.746}&0.731&\textbf{0.754}&0.751&0.736&\textbf{0.790}\\ \hline
\end{tabular}
\end{table*}

\subsection{Comparison with Instance Weighting Only}

Since a few instance weighting methods have been applied on transfer learning, in this subsection, we compare our algorithm \ALG\ with the methods using only instance weighting(Uni-Weighting) to see whether it is necessary to integrate instance weighting and feature weighting. We will compare \ALG\ with feature weighting (or extracting) only methods in the next subsection. {Figure~\ref{fig:on}} shows the classification performance  of NB, Uni-NB and \ALG-NB on two cross-language classification tasks. From the figure, it is clear that Uni-NB outperforms NB but does worse than \ALG-NB. The results show the benefit of combining feature weighting and instance weighting.
%\pagebreak

\subsection{Comparison with Baselines}
In this subsection, we conduct experiments on all the six datasets. Five of the tasks are binary class classification while the other one is a 3-class classification. We use naive Bayes,  LibSVM and IB as base classifiers in our \ALG\ algorithm, which are named \ALG-NB, \ALG-SVM and \ALG-IB respectively. {Table ~\ref{tbl:baselines}} shows the experiments results of the comparisons with different baseline methods as well as the ``upper bound" methods. From these tables, we see that the \ALG\ algorithms are consistently better than their base classifiers. Further more, the \ALG\ algorithms in some tasks perform as good as or even outperform the ``upper bound" methods.

We compare the \ALG\ algorithm with TCA~\cite{DBLP:conf/ijcai/PanTKY09} and EDC~\cite{DBLP:conf/kdd/ChenLTW09} in smaller datasets ( randomly select around 1000 instances in each task with about 10000 features) for the computational issue including memory usage of EDC\footnote{EDC needs $O(m^2)$ of memory, where $m$ is the number of features.}. {Table ~\ref{tbl:TCAEDC}} shows the result of the comparison. Apparently, \ALG\ outperforms TCA and EDC by $5.2\%$ and $7.3\%$ in the overall $F_1$-measure.


\comment{The evaluation metrics are macro-average precision, recall and $F_1$-measure, of which we have just given the definitions. In these experiments, we set $iterNum$ to 3, $t^F$ to 0.15$s(w^{F})$ and $t^I$ to 0.4$s(w^I)$. 0.15$s(w^{F})$ means that 15\% features at the end of sorted $w^{F}$ array which organized from large to small should be removed. Similarly, 0.4$s(w^I)$ means that 40\% instances at the end of sorted $w^I$ array which organized from large to small should be removed.

\subsection{Running Time Analysis}
We record the running time of \ALG\ in the experiments. \textbf{Figure ~\ref{fig:time}} indicates that time consumed by \ALG\ increases roughly linearly along with increasing of the size of the training datasets in the source domains. }

\begin{table*}[t]
\centering
\caption{The Precision, Recall and F1-measure on Six Datasets 1:Games vs. News, 
2:Health vs. Home, 3:News vs. Recreation, 4:News vs. Recreation, 5:Recreation vs. Science and 6:Recreation vs. Reference vs. Shopping.}\label{tbl:baselines}
\begin{tabular}{|c|c c|c c|c c c|c c c|}\hline
\multirow{2}{*}{Data set}&\multicolumn{10}{|c|}{Precision}\\ \cline{2-11}
&NB&\ALG-NB&SVM&\ALG-SVM&TSVM&IB&\ALG-IB&NB-CN&SVM-CN&TSVM-CN\\ \hline
1&0.823&{0.829}&0.866&{0.882}&0.876&0.846&{0.893}&0.767&0.903&0.960\\
2&0.794&{0.836}&0.812&{0.841}&{0.976}&0.840&0.888&0.937&0.905&0.914\\
3&0.774&{0.793}&0.856&{0.890}&{0.864}&0.752&0.846&0.922&0.894&0.903\\
4&0.653&{0.717}&0.610&{0.661}&0.764&0.750&{0.774}&0.784&0.846&0.764\\
5&0.713&{0.804}&0.752&{0.889}&0.820&0.833&{0.842}&0.857&0.928&0.822\\
6&0.673&{0.725}&0.654&{0.711}&-&0.712&{0.785}&0.839&0.889&-\\ \hline
Average&0.738&{0.784}&0.758&{0.812}&-&0.789&{0.838}&0.851&0.894&-\\ \hline
\end{tabular}
%\end{table*}
%
%\begin{table*}[hbtp]
\centering
%\caption{The Recall on Six Datasets 1:Arts vs. Computers 2:Games vs. News
%3:Health vs. Home 4:News vs. Recreation 5:Recreation vs. Science 6:Recreation vs. Reference vs. Shopping}\label{fig:SVM}
\begin{tabular}{|c|c c|c c|c c c|c c c|}\hline
\multirow{2}{*}{Data set}&\multicolumn{10}{|c|}{Recall}\\ \cline{2-11}
&NB&\ALG-NB&SVM&\ALG-SVM&TSVM&IB&\ALG-IB&NB-CN&SVM-CN&TSVM-CN\\ \hline
1&0.815&{0.820}&0.871&{0.888}&0.788&0.862&{0.902}&0.946&0.956&0.912\\
2&0.803&{0.848}&0.835&{0.859}&0.719&0.842&{0.900}&0.808&0.904&0.811\\
3&0.799&{0.819}&0.875&{0.923}&0.669&0.813&{0.827}&0.793&0.989&0.816\\
4&0.662&{0.730}&0.583&{0.685}&0.569&0.722&{0.779}&0.773&0.852&0.877\\
5&0.727&{0.824}&0.768&{0.901}&0.749&0.773&{0.865}&0.854&0.916&0.932\\
6&0.659&{0.714}&0.662&{0.726}&-&0.736&{0.802}&0.83&0.891&-\\ \hline
Average&0.744&{0.793}&0.766&{0.830}&-&{0.791}&0.846&0.834&0.918&-\\ \hline
\end{tabular}
%\end{table*}
%\begin{table*}[hbtp]
\centering
%\caption{The F1-measure on Six Datasets 1:Arts vs. Computers 2:Games vs. News
%3:Health vs. Home 4:News vs. Recreation 5:Recreation vs. Science 6:Recreation vs. Reference vs. Shopping}\label{fig:IB}
\begin{tabular}{|c|c c|c c|c c c|c c c|}\hline
\multirow{2}{*}{Data set}&\multicolumn{10}{|c|}{F1-measure}\\ \cline{2-11}
&NB&\ALG-NB&SVM&\ALG-SVM&TSVM&IB&\ALG-IB&NB-CN&SVM-CN&TSVM-CN\\ \hline
1&0.819&{0.824}&0.869&{0.885}&0.830&0.854&{0.897}&0.847&0.929&0.935\\
2&0.798&{0.842}&0.823&{0.850}&0.828&0.841&{0.892}&0.868&0.904&0.859\\
3&0.786&{0.805}&0.865&{0.906}&0.754&0.782&{0.836}&0.853&0.939&0.857\\
4&0.657&{0.723}&0.596&{0.673}&0.652&0.736&{0.777}&0.778&0.846&0.817\\
5&0.720&{0.814}&0.760&{0.895}&0.783&0.802&{0.853}&0.855&0.922&0.874\\
6&0.666&{0.719}&0.658&{0.718}&-&0.724&{0.793}&0.834&0.890&-\\ \hline
Average&0.741&{0.788}&0.772&{0.821}&-&{0.790}&0.841&0.839&0.905&-\\ \hline
\end{tabular}
\end{table*}


\comment{
\subsection{Experiments on Iteration}
Since our algorithm \ALG\ is an iterative algorithm, an important factor for \ALG\ is the number of iterations($iterNum$). Now let us empirically show the effect of difference on $iterNum$. Figure 2 shows the $F_1$-measure for each $iterNum$ on two datasets, GamesVsNews and RecreationVsScience.}
\comment{
\subsection{Experiments on Impact of Number of Instances}
As in section 5.4 we do not use the whole datasets to conduct the experiments due to the  computing resources limit, now in this section we focus on the effect of size of datasets on the experiments results. Also,experiments are executed on two datasets, GamesVsNews and RecreationVsScience. Results are shown in Figure 3.}
\comment{
\subsection{Experiments on Threshold Difference}}
\comment{
\subsection{Experiments on Used Time}}
\comment{
\begin{figure}%[htbp]
\centering
\includegraphics[width=0.47\textwidth]{Figure/time} 
\caption{Runtime(minutes) of \ALG\ using NB as the classifier}\label{fig:time}
\end{figure}
}
\comment{
\begin{figure*}
\centering
\subfigure{
\includegraphics[width=0.3\textwidth]{Figure/nb-preci1} }
\subfigure{
\includegraphics[width=0.3\textwidth]{Figure/nb-recall1} }
\subfigure{
\includegraphics[width=0.3\textwidth]{Figure/nb-f1} }
\caption{Experiments on Used Time}
\end{figure*}}
}
%\pagebreak

%\begin{table}[tbp]
%\centering
%\caption{The Experiments Results on Six Datasets Based on Using TSVM as the Classifier}
%\begin{tabular}{|c|c|c|c|}\hline
%Data set&Precision&Recall&$F1$-measure\\ \hline
%ArtsVsComputers&0.876&0.788&0.830\\
%GamesVsNews&0.976&0.719&0.828\\
%HealthVsHome&0.864&0.669&0.754\\
%NewsVsRecreaction&0.764&0.569&0.652\\
%RecreationVsScience&0.820&0.749&0.783\\ \hline
%
%\end{tabular}
%\end{table}
