% !TEX root = ../ICDM2011.tex
%  
\section{Our Approach}\label{sec:appr}

In this section, we present a novel adaptive approach to tackle the transfer clustering problem by making use of auxiliary data appropriately according to the similarity between target data and auxiliary data. Here we discuss our algorithm's theoretical properties. 

\subsection{Objective Function}
We build the model of our algorithm based on the information theoretic co-clustering ~\cite{DBLP:conf/kdd/DhillonMM03}. In the information theoretic co-clustering, the objective function of co-clustering is formed as minimizing loss in mutual information between instances and features, before and after co-clustering. Take the target item set $\mathcal{X}_{T}$ and the feature space $\mathcal{F}$ as example, the objective function can be expressed as 
\begin{equation}\label{eq1}
I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F),
\end{equation}
where $I(\cdot;\cdot)$ denotes the mutual information between two random variables ~\cite{DBLP:books/daglib/0016881} that $I(\mathcal{X}_{T};\mathcal{F})=\sum_{\mathsf{x}_{t}\in\mathcal{X}_{T}}\sum_{f\in\mathcal{F}}p(\mathsf{x}_{t},f)
\log\frac{p(\mathsf{x}_{t},f)}{p(\mathsf{x}_{t})p(f)}$. Meanwhile, $I(\mathcal{Y}_{T};\mathcal{Y}_F)$ corresponds to the joint probability distribution $p(\mathcal{Y}_{T},\mathcal{Y}_F)$ which is defined as
\begin{equation}\label{eq2}
p(\mathsf{y}_{t},\mathsf{y}_f)=\sum_{h_T(\mathsf{x}_{t})=\mathsf{y}_t}\sum_{h_F(f)=\mathsf{y}_f}p(\mathsf{x}_{t},f).
\end{equation}

In this paper, we model our adaptive transfer clustering as performing co-clustering operations on target set $\mathcal{T}$ and auxiliary set $\mathcal{S}$ simultaneously with the shared feature space $\mathcal{F}$. Thus, the objective function can be formulated as
\begin{equation}\label{eq3}
\mathcal{J}=I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F)+
\lambda [I(\mathcal{X}_{S};\mathcal{F})-I(\mathcal{Y}_{S};\mathcal{Y}_F)].
\end{equation}
In Eq.~(\ref{eq3}), $I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F)$ represents the \textit{loss} in target set $\mathcal{T}$ while $I(\mathcal{X}_{S};\mathcal{F})-I(\mathcal{Y}_{S};\mathcal{Y}_F)$ is the one on auxiliary set $\mathcal{S}$. In order to realize the adaptive transfer we use $\lambda$ to control the influence of auxiliary set $\mathcal{S}$ on $\mathcal{J}$. In this study, $\lambda$ is automatically set according to the similarity between $\mathcal{T}$ and $\mathcal{S}$  to ensure a safe transfer. Also, from Eq.~(\ref{eq3}), we can see that, although the two part of $\mathcal{J}$, $I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F)$ and
$I(\mathcal{X}_{S};\mathcal{F})-I(\mathcal{Y}_{S};\mathcal{Y}_F)$ are computed separately, they share the same feature space $\mathcal{F}$. This is the key to transfer the knowledge of $\mathcal{S}$.

Our following goal is to minimize the value of the objective function $\mathcal{J}$. We first rewrite the objective function $\mathcal{J}$ with Kullback-Leibler divergence ~\cite{DBLP:books/daglib/0016881} (KL divergence), and then minimize the reformulated objective function.

Based on the Lemma 2.1 in ~\cite{DBLP:conf/kdd/DhillonMM03}, we have $I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F)=D(p(\mathcal{X}_{T},\mathcal{F})\|\tilde p(\mathcal{X}_{T},\mathcal{F}))$. Similarly, $I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{S};\mathcal{Y}_F)=D(p(\mathcal{X}_{S},\mathcal{F})\|\tilde p(\mathcal{X}_{S},\mathcal{F}))$, where $D(\cdot\|\cdot)$ denotes the KL divergence between two probability distributions ~\cite{DBLP:books/daglib/0016881} and $D(p\|q)=\sum_{\mathsf{x}}p(\mathsf{x})\log\frac{p(\mathsf{x})}{q(\mathsf{x})}$. $\tilde p(\mathcal{X},\mathcal{F})$ is defined as follows.

\begin{mydef}\label{def2}
Set $\tilde p(\mathcal{X},\mathcal{F})$ to the joint probability distribution of $\mathcal{X}$ and $\mathcal{F}$ with respect to the co-clusters $\mathcal{Y}_{X}$ and $\mathcal{Y}_{F}$, then we have	
\begin{equation}\label{eq4}
\tilde p(\mathsf{x},f)=p(\mathsf{y}_\mathsf{x},\mathsf{y}_f)\frac{p(\mathsf{x})p(f)}{p(\mathsf{y}_\mathsf{x})p(\mathsf{y}_f)},
\end{equation}
where $h_X(\mathsf{x})=\mathsf{y}_\mathsf{x}$ and $h_F(f)=\mathsf{y}_f$, where
\begin{equation}
h_X=\begin{cases}
h_T\text{, if $\mathsf{x}\in\mathcal{X}_T$}\\
h_S \text{, otherwise}
\end{cases}.
\end{equation}
\end{mydef}

Hence, the objective function in Eq.~(\ref{eq3}) can be reformulated as
\begin{equation}\label{eq5}
\begin{split}
\mathcal{J}&=I(\mathcal{X}_{T};\mathcal{F})-I(\mathcal{Y}_{T};\mathcal{Y}_F)+
\lambda [I(\mathcal{X}_{S};\mathcal{F})-I(\mathcal{Y}_{S};\mathcal{Y}_F)]\\
&=D(p(\mathcal{X}_{T},\mathcal{F})\|\tilde p(\mathcal{X}_{T},\mathcal{F}))+\lambda D(q(\mathcal{X}_{S},\mathcal{F})\|\tilde q(\mathcal{X}_{S},\mathcal{F})).
\end{split}
\end{equation}

According to Lemma 2 in ~\cite{DBLP:conf/icml/DaiYXY08}, 
\begin{equation}\label{eq6}
\begin{split}
D(p(\mathcal{X}_{T},\mathcal{F})\| & \tilde p(\mathcal{X}_{T},\mathcal{F}))\\
&=\sum_{\mathsf{y}_{t}\in\mathcal{Y}_{t}}\sum_{h_{T}(\mathsf{x}_{t})=\mathsf{y}_{t}}p(\mathsf{x}_{t})
D(p(\mathcal{F}|\mathsf{x}_{t})\|\tilde{p}(\mathcal{F}|\mathsf{y}_{t}))\\
&=\sum_{\mathsf{y}_{f}\in\mathcal{Y}_{F}}\sum_{h_{F}(f)=\mathsf{y}_{f}}p(f)
D(p(\mathcal{X}_{T}|f)\|\tilde{p}(\mathcal{X}_{T}|\mathsf{y}_{f}))
\end{split}
\end{equation}
and
\begin{equation}\label{eq7}
\begin{split}
D(q(\mathcal{X}_{S},\mathcal{F})\| & \tilde q(\mathcal{X}_{S},\mathcal{F}))\\
&=\sum_{\mathsf{y}_{s}\in\mathcal{Y}_{s}}\sum_{h_{S}(\mathsf{x}_{s})=\mathsf{y}_{s}}q(\mathsf{x}_{s})
D(q(\mathcal{F}|\mathsf{x}_{s})\|\tilde{q}(\mathcal{F}|\mathsf{y}_{s}))\\
&=\sum_{\mathsf{y}_{f}\in\mathcal{Y}_{F}}\sum_{h_{F}(f)=\mathsf{y}_{f}}q(f)
D(q(\mathcal{X}_{S}|f)\|\tilde{q}(\mathcal{X}_{S}|\mathsf{y}_{f})).
\end{split}
\end{equation}

Therefore, we get following equation about objective function $\mathcal{J}$ as below,
\begin{subequations}
\begin{eqnarray}\label{eq8}
%\begin{split}
\mathcal{J} & =&\sum_{\mathsf{y}_{t}\in\mathcal{Y}_{t}}\sum_{h_{T}(\mathsf{x}_{t})=\mathsf{y}_{t}}p(\mathsf{x}_{t})
D(p(\mathcal{F}|\mathsf{x}_{t})\|\tilde{p}(\mathcal{F}|\mathsf{y}_{t})) \label{eq8a} \\
& +&\lambda 
[\sum_{\mathsf{y}_{s}\in\mathcal{Y}_{s}}\sum_{h_{S}(\mathsf{x}_{s})=\mathsf{y}_{s}}q(\mathsf{x}_{s})
D(q(\mathcal{F}|\mathsf{x}_{s})\|\tilde{q}(\mathcal{F}|\mathsf{y}_{s}))] \nonumber\\
& = &\sum_{\mathsf{y}_{f}\in\mathcal{Y}_{F}}\sum_{h_{F}(f)=\mathsf{y}_{f}}p(f)
D(p(\mathcal{X}_{T}|f)\|\tilde{p}(\mathcal{X}_{T}|\mathsf{y}_{f})) \label{eq8b} \\
&+&\lambda
[\sum_{\mathsf{y}_{f}\in\mathcal{Y}_{F}}\sum_{h_{F}(f)=\mathsf{y}_{f}}q(f)
D(q(\mathcal{X}_{S}|f)\|\tilde{q}(\mathcal{X}_{S}|\mathsf{y}_{f}))]. \nonumber
%\end{split}
\end{eqnarray}
\end{subequations}
\subsection{Optimizing for Clustering}
It is difficult to update $h_{T}$, $h_{S}$ and $h_{F}$ simultaneously. As a result, we want to fresh them one by one iteratively\footnote{In this paper, $C_T$, $C_S$ and $C_\mathcal{F}$ are fixed}. From Eq.~(\ref{eq8a}), we can see that if $h_{F}$ is fixed, $h_{T}$ and $h_{S}$ can be update separately through following equations.
\begin{equation}\label{eq9}
h_T(\mathsf{x}_{t})=\arg\min_{\mathsf{y}_{t}\in\mathcal{Y}_{T}}D(p(\mathcal{F}|\mathsf{x}_{t})\|\tilde{p}(\mathcal{F}|\mathsf{y}_{t})) 
\end{equation}
and
\begin{equation}\label{eq10}
h_S(\mathsf{x}_{s})=\arg\min_{\mathsf{y}_{s}\in\mathcal{Y}_{S}}D(q(\mathcal{F}|\mathsf{x}_{s})\|\tilde{q}(\mathcal{F}|\mathsf{y}_{s})). 
\end{equation}

On the other hand, from Eq.~(\ref{eq8b}), if $h_{T}$ and $h_{S}$ are fixed, $h_{F}$ is determined by the equation below:
\begin{equation}\label{eq11}
\begin{split}
h_{F}(f)& =\arg\min_{\mathsf{y}_{f}\in\mathcal{Y}_{F}} p(f)
D(p(\mathcal{X}_{T}|f)\|\tilde{p}(\mathcal{X}_{T}|\mathsf{y}_{f}))\\
&+\lambda_f
[q(f)
D(q(\mathcal{X}_{S}|f)\|\tilde{q}(\mathcal{X}_{S}|\mathsf{y}_{f}))].
\end{split}
\end{equation}

Here for Eq.~(\ref{eq11}), we use the $\lambda_f$ to control the influence of auxiliary set $\mathcal{S}$ based on the similarity of two sets. More specifically, we consider the similarity of each feature separately, therefore, each feature has its own weight $\lambda_{f}$. In order to get $\lambda_f$, we first define $\mathfrak{D}_f$, the difference of a feature $f$ between $\mathcal{X}_T$ and $\mathcal{X}_S$:

\comment{
\begin{equation}\label{eq12}
%\lambda_{f}=\frac{MaxS_{f}-S_{f}}{MaxS_{f}-MinS_{f}},
\lambda_f=\frac{\max_f S_f-S_f}{\max_f S_f-\min_f S_f},
\end{equation}
where
}
\begin{equation}\label{eq13}
\mathfrak{D}_f=D(q(\mathcal{X}_{S}|f)\|p(\mathcal{X}_{T}|f)).
\end{equation}
Naturally, $\lambda_f$ should be high if distributions of $f$ between $\mathcal{X}_{T}$ and $\mathcal{X}_{T}$ are similar. Thus, $\lambda_f$ is the result of the normalized $\mathfrak{D}_f$ in a ``reverse" order taking all features into consideration.
From the feature selection view, we try to select some pivot features which are more useful as the signal to assist the clustering. Here we increase the weight of the features which have more different probability distributions between clusters in a set to let them instruct the clustering. We set $D_W(\cdot\|\cdot)$ as the ``weighted" KL-divergence which has taken the importance of each feature into consideration. Thus, Eq.~(\ref{eq9}) can be modified to:
\begin{equation}\label{eq14}
\begin{split}
h_T(\mathsf{x}_{t}) & =\arg\min_{\mathsf{y}_{t}\in\mathcal{Y}_{T}}D_W(p(\mathcal{F}|\mathsf{x}_{t})\|\tilde{p}(\mathcal{F}|\mathsf{y}_{t}))\\
& = \arg\min_{\mathsf{y}_{t}\in\mathcal{Y}_{T}}\sum_fw_{tf}\log p(f|\mathsf{x}_{t})\frac{p(f|\mathsf{x}_{t})}{\tilde{p}(f|\mathsf{y}_{t})},
\end{split}
\end{equation}
where $w_{tf}$ is shown as follows:
\begin{equation}\label{eq15}
w_{tf}=\frac{\sum_{k=1}^{C_T}\sum_{c=1,c\neq k}^{C_T}D(\tilde{p}(f|y_{tk})\|\tilde{p}(f|y_{tc}))}
{\max_f\sum_{k=1}^{C_T}\sum_{c=1,c\neq k}^{C_T}D(\tilde{p}(f|y_{tk})\|\tilde{p}(f|y_{tc}))}.
\end{equation}

Similarly, 
\begin{equation}\label{eq16}
\begin{split}
h_S(\mathsf{x}_{s}) & =\arg\min_{\mathsf{y}_{s}\in\mathcal{Y}_{S}}D_W(q(\mathcal{F}|\mathsf{x}_{s})\|\tilde{q}(\mathcal{F}|\mathsf{y}_{s}))\\
& = \arg\min_{\mathsf{y}_{s}\in\mathcal{Y}_{s}}\sum_fw_{sf}\log q(f|\mathsf{x}_{s})\frac{q(f|\mathsf{x}_{s})}{\tilde{q}(f|\mathsf{y}_{s})},
\end{split}
\end{equation}
where $w_{sf}$ is shown as follows:
\begin{equation}\label{eq17}
w_{sf}=\frac{\sum_{l=1}^{C_S}\sum_{c=1,c\neq l}^{C_S}D(\tilde{q}(f|y_{sl})\|\tilde{q}(f|y_{sc}))}
{\max_f\sum_{l=1}^{C_S}\sum_{c=1,c\neq l}^{C_S}D(\tilde{q}(f|y_{sl})\|\tilde{q}(f|y_{sc}))}.
\end{equation}

Our algorithm is given in \textbf{Algorithm~\ref{alg:1}} (\textbf{\ALG}) in detail.
\comment{However, minimizing $\mathcal{J}$ is not so easy since it is non-convex and there is no good solution currently to directly optimize  $\mathcal{J}$.}



\comment{
\begin{equation}\label{equa1}
\begin{array}{r}
\DIS(W_F,W_I) = \|D_B\|-\|D_I\|+\lambda_{W_F}\|\mathbf{1_F}-W_F\|^2 \\
                                          +\lambda_{W_I}\|\mathbf{1_I}-W_I\|^2s ,
\end{array}
\end{equation}
where $D_B,\text{ } D_I \in \mathbb{R}^{n_f}$ are column vectors. $D_B$ is used to estimate the distance between $\mathcal{D}_{S}$ and $\mathcal{D}_{T}$ (the smaller, the better). Besides, we let $D_I$ to estimate the difference of $A_i$ among different classes, which can be also considered as the inner distance of a domain. $\bold{1_F} \in \mathbb{R}^{n_f}$ and $\bold{1_I} \in \mathbb{R}^{n_S}$ are column vectors with all ones. $\|\bold{1_F}-W_F\|^2$ and $\|\bold{1_I}-W_I\|^2$ are to control the change of two domains. $\lambda_{W_F}$ and $\lambda_{W_I}$ are trade-off factors. Here, we use KL divergence\cite{KL} to calculate the distance of features. Thus Eq.(\ref{equa1}) can be written as:
\begin{equation}\label{equa2}
\begin{split}
\DIS(W_F,W_I) =& \sum W_{F_i}*D_{KL}(p(A_i)\|q(A_i))\\
& -\sum W_{F_i}*(\sum_{y_j,y_k\in \mathcal{Y}}D_{KL}(p_{j}(A_i)\|p_{k}(A_i))\\
& +\lambda_{W_F}\|\bold{1_F}-W_F\|^2+\lambda_{W_I}\|\bold{1_I}-W_I\|^2,
\end{split}
\end{equation}
 where $p(A_i)$ and $q(A_i)$ are probability distributions of feature $A_i$ in $\mathcal{D}_{S}$ and $\mathcal{D}_{T}$ respectively. $p_j(A_i)$ and $p_k(A_i)$ represent the distributions of $A_i$ on class $j$ and $k$ in $\mathcal{D}_{S}$ respectively. $D_I$ only consider the inner distance of $\mathcal{D}_{S}$ due to the absence of class labels in $\mathcal{D}_{T}$. $D_{KL}(p(x)\|q(x))$ is the KL-divergence defined as follows:
\begin{equation}\label{KL}
\begin{split}
D_{\mathrm{KL}}(p(x)\|q(x)) &= \int_{-\infty}^\infty p(x) \log \frac{p(x)}{q(x)}\\
                            &=\sum_x p(x) \log \frac{p(x)}{q(x)},
\end{split}
\end{equation}
Note that KL-divergence is always non-negative due to the Gibbls' inequality\cite{Non:nega}. In addition, $p(A_i)$,  $p_j(A_i)$ and $p_k(A_i)$ are the distributions of $A_i$ consisting of the weighted training instances. Each $A_i$ has the internals $V_i = \{ v_{i1},\cdots, v_{il}\}$. Eq.(\ref{equa2}) can be rewritten as:
\begin{equation}{\label{equa3}}
\begin{split}
\DIS(W_F,W_I) &= \sum_{i} W_{F_i}*D_{KL}(p(W_{I_m} \cdot \mathcal{D}_{S_{mi}})\|q(\mathcal{D}_{T_{ri}}))\\
& \quad-\sum_{i} W_{F_i}*(\sum_{y_j,y_k\in \mathcal{Y}}D_{KL}(p_{j}(W_{I_m} \cdot \mathcal{D}_{S_{mi}})\\
&\quad\|p_{k}(W_{I_m} \cdot \mathcal{D}_{S_{mi}})))\\
& \quad+\lambda_{W_F}\|\bold{1_F}-W_F\|^2+\lambda_{W_I}\|\bold{1_I}-W_I\|^2\\
%%%\end{split}
%%%\end{equation*}
%%%\begin{equation}\label{equa3}
%%%\begin{split}
&= \sum_{i} W_{F_i}*\sum_{u=1}^{il} (p(v_{u})\log \frac{p(v_{u})}{q(v_{u})})\\
&\quad-\sum_{i} W_{F_i}*(\sum_{y_j,y_k\in \mathcal{Y}}\sum_{u=1}^{il} (p_j(v_{u})\log \frac{p_j(v_{u})}{p_k(v_{u})})\\
&\quad+\lambda_{W_F}\|\bold{1_F}-W_F\|^2+\lambda_{W_I}\|\bold{1_I}-W_I\|^2,
\end{split}
\end{equation}
where $\mathcal{D}_{S_{mi}}$ is the $ith$ feature of instance $\x_{S_m} \in \mathcal{D}_{S}$. Accordingly,
$\mathcal{D}_{T_{ri}}$ is the $ith$ feature of instance $\x_{T_r} \in \mathcal{D}_{T}$. Our objective is to acquire $W_F$ and $W_I$ making $\DIS(W_F,W_I)$ as small as possible.

Now, we introduce how to iteratively update $W_F$ and $W_I$.
\comment{\subsection{Feature Weighting}}
In the first step, we fix $W_I$ and update $W_F$ to optimize $\DIS(W_F,W_I)$ by computing the derivate of $\DIS(W_F,W_I)$ with parameter $W_F$:
\begin{equation}\label{equa4}
\begin{split}
\frac{\partial \DIS(W_F,W_I)}{\partial W_{F_i}}&= \sum_{u=1}^{il} (p(v_{u})\log \frac{p(v_{u})}{q(v_{u})})\\
&\quad-(\sum_{y_j,y_k\in \mathcal{Y}}\sum_{u=1}^{il} (p_j(v_{u})\log \frac{p_j(v_{u})}{p_k(v_{u})})\\
& \quad-2*\lambda_{W_F}*(1-W_{F_i}).
\end{split}
\end{equation}
Given $W_I$ and solving \begin{equation}\label{eqdev1}\frac{\partial \DIS(W_F,W_I)}{\partial W_{F_i}} = 0,\end{equation} we can get the new value of $W_{F_i}$. %$W_{F_i}$ is normalized to range from 0 to 1.
\comment{\subsection{Instance Weighting}}
Next, we  update $W_I$ when $W_F$ is fixed. We calculate the derivate of $\DIS(W_F,W_I)$ with parameter $W_I$:
\begin{equation}\label{equa5}
\begin{split}
\frac{\partial \DIS(W_F,W_I)}{\partial W_{I_m}}=& \sum_{i} (W_{F_i}* \partial(p(v_{u})\log \frac{p(v_{u})}{q(v_{u})})\\
&-\sum_{i} W_{F_i}*\sum_{y_j,y_k\in \mathcal{Y}}\partial(p_j(v_{u})\log \frac{p_j(v_{u})}{p_k(v_{u})})\\ 
& -2*\lambda_{W_I}*(1-W_{I_i}),
\end{split}
\end{equation}
where $v_u$ equals to $\mathcal{D}_{S_{mi}}$. It is apparent that $p(v_u)$, $p_j(v_u)$, $p_k(v_u)$ are the first-order function on $W_{I_m}$'s reciprocal. Let \begin{equation}\label{eqdev2}\frac{\partial \DIS(W_F,W_I)}{\partial W_{I_m}} = 0\end{equation} and we obtain the new value of $W_{I_m}$. We can use feature weighting and instance weighting iteratively until convergence or for the specific times. We summarize the above process in \textbf{Algorithm 1} .
\input{tex/algorithm}
%\pagebreak


\comment{\begin{table}
\centering
%\caption{}
\begin{tabular}{l} \hline
\textbf{Algorithm 1} Iterative \textbf{F}eature and \textbf{I}nstance Weigh\textbf{T}ing  
\\Transductive Transfer Learning Algorithm (\textbf{FIT}) \\
 for Cross-Language Text Classification\\ \hline
\textbf{Input:} \\
$\mathcal{D}_{S}$: source domain; 
$\mathcal{D}_{T}$: target domain; 
$T$: a translator; \\
$iterNum$: The number of iteration;
$t^{F}$: The threshold for \\feature deletion;
$t^{I}$: The threshold for instance deletion.\\ \hline
\textbf{Output:} the final function $f_P: \mathcal{X}_{T} \rightarrow \mathcal{Y}_{T}$\\ \hline
\textbf{Steps:} \\
1: Translate the feature space of target domain $\mathcal{X}_{T}$ into \\the source domain: $\mathcal{X}_{T}^{T} = T(\mathcal{X}_{T})$\\
2: \textbf{for} each $x_{S_i} \in \mathcal{X}_{S}$ \textbf{do}\\
3: \quad $w^{I}_{i}$ = 1;\\
4: \textbf{end for}\\
5: \textbf{for} each $A_i$ \textbf{do}\\
6: \quad $w^{F}_{i}$ = 1;\\
7: \textbf{end for}\\
8: \textbf{for} t = 1 , $\dotsb$, $iterNum$ \textbf{do}\\
9: \: Apply the instance weighting method \\
\quad\quad in Section~\ref{sec:iw} (Eqs. 2-3)  to revise $w^{I}_{i}$\\
10: Apply the feature weighting method \\
\quad\quad in Section~\ref{sec:fw} (Eqs. 5-10) and $t^F$ to revise $w^{F}_{i}$\\
11: \textbf{end for}\\
12: Apply \textit{equation} (11) and $t^{I}$ to delete ``bad" instances \\  \quad\quad to get the revised training data $\mathcal{X}_{S}^{R}$\\
13: Build a classifier $f_P$ based on $\mathcal{X}_{S}^{R}$ \\
14: Return $f_P$\\
\hline\end{tabular}
\end{table}}
}

\comment{\subsection{Complexity Analysis}

Next, we analyze the running time of our algorithm. Recall ${n_s}$ is the number of instances in the source domain, ${n_t}$ is the number of instances in the target domain, ${n_f}$ is the number of feature and ${|\mathcal{Y}|}$ is the number of class label. According to Eq.~{\ref{equa4}}, we calculate the KL-divergence between any two different class label for each feature. Therefore the running time of updating ${W_F}$ is $O({n_f}{|\mathcal{Y}^2|}{n_s})$. Similarly, the running time complexity of updating ${W_I}$ is $O({n_f}({n_s}+{n_t}+{|\mathcal{Y}|^2}{n_s}))$ (base on Eq.~(\ref{equa5})). Assuming the \textbf{Algorithm 1} takes on-average ${n_d}$ rounds to stop, the total running time is $O({n_d}{n_f}({n_s}+{n_t}+{|\mathcal{Y}|^2}{n_s}))$.
}