
%%%%%%%%%%%%%%%%%%%%%%% file typeinst.tex %%%%%%%%%%%%%%%%%%%%%%%%%
%
% This is the LaTeX source for the instructions to authors using
% the LaTeX document class 'llncs.cls' for contributions to
% the Lecture Notes in Computer Sciences series.
% http://www.springer.com/lncs       Springer Heidelberg 2006/05/04
%
% It may be used as a template for your own input - copy it
% to a new file with a new name and use it as the basis
% for your article.
%
% NB: the document class 'llncs' has its own and detailed documentation, see
% ftp://ftp.springer.de/data/pubftp/pub/tex/latex/llncs/latex2e/llncsdoc.pdf
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\documentclass[runningheads,a4paper]{llncs}
\usepackage{hyperref}
\usepackage{amssymb}
\setcounter{tocdepth}{3}
\usepackage{graphicx}

%\usepackage{url}
%\urldef{\mailsa}\path|{alfred.hofmann, ursula.barth, ingrid.haas, %frank.holzwarth,|
%\urldef{\mailsb}\path|anna.kramer, leonie.kunz, christine.reiss, nicole.sator,|
%\urldef{\mailsc}\path|erika.siebert-cole, peter.strasser, lncs}@springer.com|    
%\newcommand{\keywords}[1]{\par\addvspace\baselineskip
%\noindent\keywordname\enspace\ignorespaces#1}

\begin{document}

\mainmatter  % start of an individual contribution

% first the title is needed
\title{It takes two to tango : Learning Sparse Coupled Representations with applications to Cross-Lingual Information Retrieval}

% a short form should be given in case it is too long for the running head
%\titlerunning{Lecture Notes in Computer Science: Authors' Instructions}

% the name(s) of the author(s) follow(s) next
%
% NB: Chinese authors should write their first names(s) in front of
% their surnames. This ensures that the names appear correctly in
% the running heads and the author index.
%
\author{Anonymous Authors%
\thanks{Please note that the MATLAB code for the proposed algorithm would be made available on the project site after the reviewing period.}
%\and Ursula Barth\and Ingrid Haas\and Frank Holzwarth\and\\
%Anna Kramer\and Leonie Kunz\and Christine Rei\ss\and\\
%Nicole Sator\and Erika Siebert-Cole\and Peter Stra\ss er
}
%
%\authorrunning{Lecture Notes in Computer Science: Authors' Instructions}
% (feature abused for this document to repeat the title also on left hand pages)

% the affiliations are given next; don't give your e-mail address
% unless you accept that it will be published
\institute{%Springer-Verlag, Computer Science Editorial,\\ Tiergartenstr. 17, 69121 Heidelberg, Germany\\
Institute1
%\mailsa\\
%\mailsb\\
%\mailsc\\
%\url{http://www.springer.com/lncs}
}

%
% NB: a more complex sample for affiliations and the mapping to the
% corresponding authors can be found in the file "llncs.dem"
% (search for the string "\mainmatter" where a contribution starts).
% "llncs.dem" accompanies the document class "llncs.cls".
%

\toctitle{Lecture Notes in Computer Science}
\tocauthor{Authors' Instructions}
\maketitle


\begin{abstract}
A sparse dictionary based coupled representation learning approach is proposed to represent documents for the task of Cross-Language Document Retrieval. Under the proposed \emph{Coupled Dictionary Learning} (CDL) algorithm, a pair of dictionaries is learnt to represent paired documents from the language pair for the task of retrieving documents across languages. In addition, two different mapping functions are learnt to transfer document representations from one language to the other. Based on the concept of Self Taught Learning (\cite{selftaught}), in addition to the parallel corpora, we make use of readily available unlabelled independent documents in either/both of the languages to improve retrieval performance. We show that the proposed algorithm learns a coupled representation using parallel documents and outperforms the existing approaches on standard parallel Wikipedia corpus using standard metrics.

\keywords{Sparse Representation, Self-Taught Learning, Dictionary Learning, Cross-Lingual IR}

\end{abstract}


\section{Introduction}
\label{intro}

%\begin{itemize}
%	\item The development of multiliguality on the web these days \& its usefulness
%	\\
	As the linguistic diversity of textual resources 
increases, and need for access to those resources 
grows, there is also greater demand for efficient 
information retrieval (IR) methods which are 
truly language-independent. The dynamic and diverging nature of human languages leads to many
varieties of natural language. These variations range from the individual level, to regional and social
dialects, and up to seemingly separate languages and language families. However, in recent years
there have been considerable achievements in data driven approaches to computational linguistics,
which exploit the redundancy in the encoded information and the structures used. Most of these
approaches are not specific to a particular language and are capable of finding the commonalities
across languages. \\

%Few short comings of existing work...
%==I need bad things about existing work.==\\

In this paper, we posit the cross-lingual information retrieval problem in a Multi-View Learning setting and propose a novel coupled dictionary learning algorithm which simultaneously learns representations of documents from two different languages alongside learning mapping functions for transforming representations from one language to the other. By dictionary we refer to a set of basis atoms using which each document can be represented as a sparse linear combination of such basis atoms. We explore the use of the proposed algorithm on task of cross-lingual document retrieval. 
\\
Focussing on the task of cross-language information retrieval, we wish to answer the following questions:
\begin{itemize}
	\item Can we simultaneously learn representations in both the languages in a coupled fashion which can well characterize the structural domains of both the languages?
	\item Can we learn mapping functions which can reveal the intrinsic relationship between the representations of both the languages?
	\item How well do the mapped representations in the other language match with the original representation?
	\item Can we benefit from enormous amount of unlabelled, non-aligned corpora available for the individual  languages? If yes, how do we incorporate learning from unlabelled data so that it helps improve the performance of the task at hand?
\end{itemize}

\hspace{-5mm}In answering these questions we make the following novel contributions,
\begin{itemize}
	\item We present a novel algorithm which simultaneously learns coupled representations and corresponding mapping functions.
	\item We make use of unlabelled data to improve the retrieval performance.
	\item We show the effectiveness of mapping functions for the task of cross-language document retrieval.
	\item We empirically establish the efficacy of the proposed algorithm by comparing it against established baselines on standard datasets.
\end{itemize}

\hspace{-5mm}In the following sections we first provide a brief overview of the challenges faced and existing approaches for Cross-Language IR in section \ref{clir} following which we formulate the problem in a multi-view learning setting in section \ref{formulation}. We present prior work in Dictionary Learning in section \ref{prior} and describe in detail the Coupled Dictionary Learning algorithm in section \ref{algo}; followed by the optimization steps (\ref{opti}) to efficiently solve the objective. Section \ref{exp} describes the experimental datasets followed by the results in section \ref{results} while section \ref{conclude} concludes.
	
%	\item The importance of having/doing multilingual IR
%	\item The challenges present in multilingual IR
%	\item How the existing approaches fail, in very short
%	\item Posit this problem as a Multi View problem
%	\item Propose a solution in brief
%	\item Write in points what questions do we wish to answer
%	\item Write in points what contributions we make
%\end{itemize}

\section{Cross Lingual IR: Challenges \& Existing Approaches}
\label{clir}
Cross-language Information Retrieval (CLIR) can be described at an abstract level as the task of retrieving
documents across languages.
In some sense, the \textit{Cross-Lingual Information Retrieval} task represents one extreme case of the so called
\textit{vocabulary mismatch problem}, i.e. the problem that the vocabulary of a user query and the vocabulary of
relevant documents can differ substantially. The bag-of-words (BOW) model notoriously suffers from the
vocabulary mismatch problem as the different dimensions are inherently orthogonal, thus neglecting relations between different words in the same language as well as across languages. Therefore, the challenging
task of retrieving documents to queries in other languages requires models going beyond the traditional
bag-of-words model.\\\\
When tackling the task of retrieving documents across languages, there seem to be essentially two main paradigms:
\begin{enumerate}
\item Translation-based approaches which rely either on a translation of documents or queries. For the translation of queries, one typically relies on bilingual dictionaries (this sense of dictionary is different from that used in this work).

\item Mapping of queries and documents into a multilingual space in which similarity between queries and documents can be computed uniformly across languages.

\end{enumerate}

Both kinds of approaches have their own strengths and weaknesses. Dictionary based approaches treat source documents independently, i.e., each source language document is translated independently of other documents. Moreover, after translation, the relationship of a given source document with the rest of the source documents is ignored. On the other
hand, supervised approaches use all the source and target language documents to infer an interlingual representation, but their strong dependency on the training data prevents them from generalizing well
to test documents from a different domain. \\

To the best of our knowledge there is no prior work which learns two separate but coupled representations for representing documents from both the languages using parallel corpora alongside utilizing the abundantly available unlabelled, non-aligned documents in either or both the languages. In this paper, we propose a novel coupled dictionary learning algorithm which learns coupled representations for documents from both the languages alongside learning mapping functions which transform representations from one language to the other using which we compute similarity between documents from different languages. We describe the basic dictionary learning algorithm in the next section following which we next formulate the cross-language document retrieval problem in terms of a coupled dictionary learning problem and present an algorithm to solve the same.



%\begin{itemize}
%	\item Importance of CLIR
%	\item Key challenges faced
%	\item Existing approaches
%	\item What's missing \& how we come in to contribute
%\end{itemize}

%\section{Problem Formulation : MultiView Setting}
%\label{pf}

%\begin{itemize}
%	\item Briefly describe MVL
%	\item Formulate the problem
%	\item Brief overview of our approach
%\end{itemize}

\section{Coupled Dictionary Learning}
\label{cdl}

In this section we describe the Coupled Dictionary Learning algorithm in detail. We first cover the prior art of Dictionary Learning in section \ref{prior} followed by problem formulation and algorithmic description. We describe the Dictionary Initialization phase in detail in section \ref{dictinit} wherein we highlight the advantage of using unlabelled (i.e., non-aligned) documents from both the languages. In section \ref{opti} we describe an efficient way to solve the objective presented using the framework of the K-SVD algorithm.


\subsection{Dictionary Learning : Prior Art}
\label{prior}
Recent years have witnessed a growing interest in the
search for sparse representations of signals. Using an overcomplete dictionary matrix $D \in\Re^{n \times K} $ that contains K prototype signal-atoms for columns $\{d_j\}_{j=1}^K$, a signal $y \in \Re^n$ can
be represented as a sparse linear combination of these atoms.
The representation of $y$ may either be exact $y=Dx$ or approximate $y\thickapprox Dx$, satisfying $\|y-Dx\|_p\leq \varepsilon$. The vector
$x\in \Re^K$ contains the representation coefficients of the signal $y$. In approximation methods, typical norms used for measuring the deviation are the $l^p$-norms for $1, 2$ and $\infty$. In this paper, we shall concentrate on the case of p=2.\\

If $n < K$ and $D$ is a full-rank matrix, an infinite number
of solutions are available for the representation problem, hence
constraints on the solution must be set. The solution with the
fewest number of non-zero coefficients is certainly an appealing
representation. This sparsest representation is the solution of
either
\begin{equation}
	min_x \|x\|_0 \hspace{4mm} subject\hspace{1mm}to \hspace{4mm} y\hspace{1mm}=\hspace{1mm}DX
\end{equation}
or
\begin{equation}
	min_x \|x\|_0 \hspace{4mm} subject\hspace{1mm}to \hspace{4mm} \|y\hspace{1mm} - DX \| < \epsilon
\end{equation}
where $\|.\|_0$ is the $l_0$-norm which measures the number of non-0 entries of a vector.\\

%Applications that can benefit from the sparsity and over completeness concepts (together or separately) include compression, regularization in  inverse problems, feature extraction, andmore. Indeed, the success of the JPEG2000 coding standard can be attributed to the sparsity of the wavelet coefficients of natural images [1]. In denoising, wavelet methods and shift-invariant variations that exploit overcomplete representation are among the most effective known algorithms for this task [2]–[5]. Sparsity and overcompleteness have been successfully used for dynamic range compression in images [6], separation of texture and cartoon content in images [7], [8], inpainting [9], and more. Here we pose the problem of Dictionary Learning and tell how useful it is in various applications. Also discuss some previous work in this. %[references:http://intranet.daiict.ac.in/~ajit_r/IT530/KSVD_IEEETSP.pdf] \\

Dictionary learning falls into a general category of techniques
known as matrix factorization. In this paper, we additionally enforce non-negativity constraint on the factor matrices and such factorizations have been widely studied as non-negative matrix factorization (NMF) \cite{nmf}. 
Applications that can benefit from the sparsity and over completeness concepts (together or separately) include compression, regularization in  inverse problems, feature extraction, and more.% Indeed, the success of the JPEG2000 coding standard can be attributed to the sparsity of the wavelet coefficients of natural images. Sparsity and dictionary learning find vast applications in feature extraction, especially in various computer vision tasks.
Extraction of the sparsest representation is a hard problem that has been extensively investigated in the past few years. \cite{ksvd} presents the K-SVD algorithm which is an iterative method that alternates between sparse coding of the examples based on the current dictionary and a process of updating the dictionary atoms to better fit the data. The update of the dictionary columns is combined with an update of the sparse representations, thereby accelerating convergence. The K-SVD algorithm is flexible and can work with any pursuit method (e.g., basis pursuit, FOCUSS, or matching pursuit). We next formulate the cross-language information retrieval problem as a coupled dictionary learning algorithm and present an algorithm to efficiently solve the proposed objective.

\subsection{Problem Formulation}
\label{formulation}
We consider the task of cross-lingual document retrieval wherein given a query document in one language, the goal is to find the most similar
document from the corpus in another language. Using the conventional vector space model with TF-IDF (Term Frequency-Inverse Document Frequency) term-weighting, we represent the documents in the language pair $<l_1,l_2>$ as a n-dimensional vector with the matrices $Y_{L_1} \in \Re^{n\times N}$ and $Y_{L_2} \in \Re^{n\times N}$ representing the entire training dataset available with each column representing a document and the corresponding columns in the two matrices representing aligned documents in the parallel corpora.
\\\\
The cross-lingual document representation problem can be formulated as: given a parallel corpora of a language pair $<l_1,l_2>$, can we learn document representations in each of the languages ($X_{L_1} \in \Re^{K\times N}$ and $X_{L_2} \in \Re^{K\times N}$) along with their corresponding mappings ($T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$) so as to perform well in the challenging task of retrieving documents for queries in other languages. Here N is the number of training documents we have in each language, n is the dimension of each input vector and K is the number of dictionary atoms in each of the dictionaries, for simplicity we keep the number of dictionary atoms same in both the dictionaries. 
\\\\
The proposed algorithm takes in the TF-IDF based document representations $Y_{L_1}$ and $Y_{L_2}$ as input and outputs two dictionaries $D_{L_1}$ and $D_{L_2}$ (one for each language), the learnt sparse representations of the documents $X_{L_1}$ and $X_{L_2}$ and the mappings ($T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$) to transform the representations between languages. We next describe the proposed Coupled Dictionary Learning in detail.

\subsection{Coupled Dictionary Learning}
\label{algo}
In this work, we propose a simple yet effective model to solve the complex task of learning document representations for cross-lingual information retrieval problems. Specifically, we learn a dictionary pair and mapping functions simultaneously. The pair of dictionaries aim to characterize the two languages and the mapping functions reveal the intrinsic relationship between representations of the two languages.
\\\\
Since each pair of documents in the parallel corpora refer to the same article, it is reasonable to assume that there exists transformations from one representation of the document to the other. Some coupled dictionary learning frameworks have been proposed earlier (\cite{coupled1}, \cite{coupled2}) for the task of image super-resolution and image-style transformation. These algorithms assume a single coupled subspace to find the representation coefficients of the image pair in addition to the fact that the representations learnt should be strictly equal. However, these assumptions are too strong to address the flexibility of image structures
in different styles or the language representations in the language pair. In this paper, we relax these assumptions and assume that there exists a dictionary pair over which the representations of two languages have stable mappings. We build our algorithm on top of the framework proposed in \cite{scdl} and introduce a new objective function which learns two different mappings instead of a single one to transform representations from one language representation to the other.
\\\\
We denote by $Y_{L_1} \in R^{n\times N}$ and $Y_{L_2} \in R^{n\times N}$ the training datasets formed by documents in the parallel corpora of the two languages. The corresponding dictionaries are notated as $D_{L_1} \in R^{n\times K}$ and $D_{L_1} \in R^{n\times K}$ with the mapping functions $T_{Y_{L_1}\rightarrow Y_{L_2}} \in R^{K\times K}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}} \in R^{K\times K}$ with $K$ being the number of dictionary atoms. Our framework is based on the Semi-Coupled Dictionary algorithm proposed in [9]. 
\\\\
We propose to minimize the following dictionary learning objective:
%\begin{equation*}
\\\\
$ <D_{L_1},D_{L_2},X_{L_1},X_{L_2},T_{Y_{L_1}\rightarrow Y_{L_2}} T_{Y_{L_2}\rightarrow Y_{L_1}}> \hspace{2mm} = $

\vspace{-2mm}
\begin{center}
$min_{\{D_{L_1},D_{L_2},T_{Y_{L_1}\rightarrow Y_{L_2}},T_{Y_{L_2}\rightarrow Y_{L_1}}\}} \parallel Y_{L_1}-D_{L_1}X_{L_1}\parallel_2^2 + \parallel Y_{L_2}-D_{L_2}X_{L_2}\parallel_2^2 $ \\ $+\hspace{2mm} \alpha\parallel X_{L_2}-\hspace{1mm}T_{Y_{L_1}\rightarrow Y_{L_2}}X_{L_1}\parallel_2^2 \hspace{2mm} + \hspace{2mm} \beta\parallel X_{L_1}-\hspace{1mm} T_{Y_{L_2}\rightarrow Y_{L_1}}X_{L_2}\parallel_2^2$
\end{center}
\vspace{-0.5mm}
s.t. $\forall i, \|x_i\|_0 \leq T$ 
and $\|x{_i}{^{'}}\|_0 \leq T $
%
%\end{equation*}
where $X_{L_1}=\left[ x_1,x_2,...,x_n\right]    \in 
R^{K
\times 
N}
$ 
are the sparse codes of the input data of language $l_1$ and $X_{L_2}=\left[ x_1^{'},x_2^{'},...,x_n^{'}\right]  \in R^{K\times N}$ are the sparse codes of the input data of language $l_2$ and T is the sparsity constraint factor.\\\\
The term $\parallel Y_{L_i}-D_{L_i}X_{L_i}\parallel_2^2$ 
for 
$ i \in \{1,2\} $ represents the reconstruction error for documents of both the languages which intuitively implies how well the learnt representations represent the original documents. $\alpha$ and $\beta$ control the relative contribution between reconstructive and mapping regularizations. By $\parallel X_{L_2}-T_{Y_{L_1}\rightarrow Y_{L_2}}X_{L_1}\parallel_2^2$ we intend to minimize the mapping error between the transformed sparse codes of document in language $l_1$ and the corresponding document representation in $l_2$ while by $\parallel X_{L_1}-T_{Y_{L_2}\rightarrow Y_{L_1}}X_{L_2}\parallel_2^2$ we intend to minimize the mapping error between the transformed sparse codes of document in language $l_1$ and the corresponding document representation in $l_1$. This is the main contribution of our paper as we believe that if both the languages are resource scarce then ideally we should penalize errors in both the mapping functions $T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$. \\

\textbf{\hspace{-5mm}Motivation for separate transformations:\\}
When dealing with cross-lingual information retrieval task wherein only one of the languages is resource scarce we might not need the transformations from the resource-rich language to the resource scarce language. In this case only one transformation should suffice. On the other hand, for the case when both the languages are resource scarce, we might have a language in which unlabelled data is more readily available than in the corresponding language, in which case the initialized dictionary would be more capable of giving better results and hence we might want to transform the document from its original language to the other language to perform retrieval tasks. This is our main motivation to have two separate transformation functions instead of a single one. By having separate transformation functions we penalize the mapping errors in both the transformations and hence get optimized transformations for both the languages. Note that in the proposed model, the coding coefficients of $X_{L_1}$ and $X_{L_2}$ are related by the mapping functions $T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$ using which we could transform a document representation in language $l_1$ to its corresponding representation in $l_2$ and vice-versa.


\subsection{Dictionary Initialization}
\label{dictinit}
We need to initialize the parameters $D_{L_1}$, $D_{L_2}$, $T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$. For $D_{L_i}$, $i \in \{1,2\}$, we employ several iterations of K-SVD for each dictionary using unlabelled data from the corresponding language. This is in spirit of the \textit{Self-Taught Learning} (\cite{selftaught}) framework where in unlabelled data is used to learn an initial representation in an unsupervised manner. To the best of our knowledge, none of the existing approach employed for Cross-Lingual information retrieval tasks makes use of unlabelled data to improve performance. The dictionary initialization step in our algorithm makes use of unlabelled data available in either (or both) the languages to learn an initial representation for those documents independently. By unlabelled we refer to non-aligned documents which might be readily available in any language.  Given the initialized dictionaries, we perform original K-SVD to compute the sparse codes $X_{L_i}$, $i \in \{1,2\}$ of training data $Y_{L_i}$, $i \in \{1,2\}$ and use them to initialize the mapping parameters.

%\begin{itemize}
%	\item Re-introduce the imp of UFLDL/Self-Taught learning.	
%	\item Talk about the Dict Learning.
%	\item Give the equations.
%\end{itemize}

\subsection{Optimization}
\label{opti}
%Here all the procedures to solve it come. Discuss advantages over SCDL.
We use efficient K-SVD algorithm to find the optimal solution for all parameter simultaneously. This is quite different from the original approach as adopted in [9]. Since the objective is not jointly convex in all the parameters, the authors in [9] use iterative algorithm to alternately optimize the parameters. Instead of following that approach, we iterate between the representations of the two languages using K-SVD based implementation to find the optimal solutions for all parameters. With the initialization of the dictionary pairs $D_{L_1}$ and $D_{L_1}$, the mapping functions $T_{Y_{L_1}\rightarrow Y_{L_2}}$ and $T_{Y_{L_2}\rightarrow Y_{L_1}}$, we iterate between the solutions of the following two equations:
\begin{center}

	$<D_{new}^1,X^1> = arg min_{\{D_{new},X^1\}} \parallel Y_{new}^1 -D_{new}^1 X^1 \parallel_2^2$
	\hspace{4mm}\\
	
	$and$\\

	$<D_{new}^2,X^2> = arg min_{\{D_{new},X^1\}} \parallel Y_{new}^2 - D_{new}^2 X^1 \parallel_2^2$

\end{center}
s.t. $\forall i, \|x_i^1\|_0 \leq T$ $ and $ $\|x_i^2\|_0 \leq T $
where:\\
%\begin{center}
	$Y_{new}^1 = $
	%$\begin{pmatrix}
	$\left(
	\begin{array}{c}
		Y_{L_1} \\ \sqrt{\alpha}\hspace{2mm} X_{L_2}
	\end{array}
	\right)$ $;	$
$	
	D_{new}^1 =	\left(
	\begin{array}{c}
		D_{L_1}\\\sqrt{\alpha}\hspace{2mm} T_{Y_{L_1}\rightarrow Y_{L_2}}
			\end{array}
	\right)
	$
	%\end{pmatrix}$
	
	$\hspace{-4mm}Y_{new}^2 = $ $\left(
	\begin{array}{c}
		Y_{L_2}\\\sqrt{\beta}\hspace{2mm} X_{L_1}
			\end{array}
	\right)
	$ $;$	$D_{new}^2 =$
$\left(
	\begin{array}{c}
		D_{L_2}\\\sqrt{\beta}\hspace{2mm} T_{Y_{L_2}\rightarrow Y_{L_1}}
		\end{array}
	\right)\\
	$

%\end{center}
The matrices $D_{new}^1$ and $D_{new}^2$ are $L_2$-normalized column wise. The equations presented above are exactly the problem which K-SVD[4] solves. Our algorithm learns a pair of dictionaries alongside mapping functions using which we can represent documents in both the languages and can map representation from one language to another so as to solve cross-lingual information retrieval tasks. We next discuss the application of the proposed algorithm to the task of cross-lingual document retrieval and mate retrieval.


\section{Coupled Representation based CLIR}
\label{cdl4clir}
Thus far, we have presented our \textit{Coupled Dictionary Learning (CDL)} algorithm to learn a pair of dictionaries and the corresponding mapping functions. We evaluate our algorithm on the task of Cross-Lingual document retrieval on standard dataset and compare our results with standard baselines. Since the dictionaries are $l_2$-normalized, the output obtained cannot directly be used to represent documents in the two languages. We describe the method which transforms the dictionaries to finally yield the document representations, followed by a detailed description of the dataset, evaluation metrics and the corresponding results obtained.

\subsection{Cross Lingual Document Representation}
\label{representation}
We obtain $D_{L_i} = [d_1^i, d_2^i, ... , d_k^i]$, for $i \in \{1,2\}$, $T_{Y_{L_1}\rightarrow Y_{L_2}}$ = $[t_1^1, t_2^1, ... , t_k^1]$  and $T_{Y_{L_2}\rightarrow Y_{L_1}}$ = $[t_1^2, t_2^2, ... , t_k^2]$ by employing K-SVD algorithm in an iterative manner to the equations presented above. We cannot simply use these for testing since these are $L_2$-normalized in $D_{new}^i$ jointly in our algorithm. Hence, we compute the desired dictionaries and mapping transformations as follows:

\begin{center}
$D_{L_1} = 
\left\lbrace 
\frac{d_1^1}{\parallel d_1^1 \parallel_2}
,
 \frac{d_2^1}{\parallel d_2^1 \parallel_2}, ..., \frac{d_k^1}{\parallel d_k^1 \parallel_2} \right\rbrace  ; \hspace{2mm} D_{L_2} = \left\lbrace \frac{d_1^2}{\parallel d_1^2 \parallel_2} , \frac{d_2^2}{\parallel d_2^2 \parallel_2}, ..., \frac{d_k^2}{\parallel d_k^2 \parallel_2} \right\rbrace $
\\
$T_{Y_{L_1}\rightarrow Y_{L_2}} = \left\lbrace \frac{t_1^1}{\parallel t_1^1 \parallel_2} , \frac{t_2^1}{\parallel t_2^1 \parallel_2}, ..., \frac{t_k^1}{\parallel t_k^1 \parallel_2} \right\rbrace  ;\hspace{2mm} T_{Y_{L_2}\rightarrow Y_{L_1}} = \left\lbrace \frac{t_1^2}{\parallel t_1^2 \parallel_2} , \frac{t_2^2}{\parallel t_2^2 \parallel_2}, ..., \frac{t_k^2}{\parallel t_k^2 \parallel_2} \right\rbrace$

\end{center}

\hspace{-5mm} For a test document $y_i^j$ in language $l_j$ $(i,j \in \{1,2\})$ we compute its sparse representation $x_i^j$ by solving the optimization problem:
\begin{center}
	$x_i^j = arg min_{x_i^j} \{ \parallel y_i^j - D_j x_i^j \parallel_2^2 \} \hspace{2mm} s.t. \hspace{2mm} \|x_i^j\|_0 \leq T $
\end{center}

\hspace{-5mm}For the task of cross-lingual document retrieval, given a query document $y_i^1$ in language $l_1$ (say) we find its representation $x_i^1$ and then use the mapping $T_{Y_{L_1}\rightarrow Y_{L_2}}$ to transform this representation to get the corresponding representation in the target language domain where we compare it with all the documents using cosine based similarity score to find the most similar document from the corpus in the other language.


\subsection{Experimental Setting}
\label{exp}
We compare CDL with existing approaches on the task of cross-lingual document retrieval.  In this cross-lingual document retrieval task, given a query document in one language, the goal is to find the most similar
document from the corpus in another language. We followed the comparable document retrieval setting described in \cite{platt} and evaluated CDL on the Wikipedia dataset used in that paper. This data set consists of Wikipedia documents in two languages, English and Spanish. An article
in English is paired with a Spanish article if they are identified as comparable across languages by the Wikipedia community. We use the same term vectors as in the previous study. The numbers of document pairs in the training/development/testing sets are 10,000, 2,000 and 2,000 respectively. The dimensionality of the raw term vectors is 20,000. 
\\
The models are evaluated by using each English document as query against all documents in Spanish and vice versa; the results from the two directions are averaged. %Figure \ref{fig1} represents the complete flowchart of the proposed Coupled Dictionary Learning based cross-lingual document retrieval system.
We next describe the evaluation metrics used to measure the performance of our approach.

%\begin{figure*}[t!]
%\begin{center}
%\resizebox{12cm}{!} 
%{
%\includegraphics[width=500pt]{SCDL.png}
%}
%\end{center}

%\caption{ \footnotesize Comparison of MMR scores. The proposed algorithm is termed CDL.}
%\label{fig1}
%\end{figure*}


%\begin{itemize}
%	\item Dataset
%	\item Representations
%	\item Parameters
%	\item Give a figure which shows the entire process.
%\end{itemize}

\subsection{Evaluation Metrics}
We evaluate the performance of the proposed method using two evaluation metrics:
\begin{description}
\itemsep -2pt
\item{\textbf{Top-1 Rank}} :  It tests whether the document with the highest similarity score is the true comparable document.
\item {\textbf{Mean reciprocal rank}} :  It is a statistic measure for evaluating any process that produces a list of possible responses to a query, ordered by probability of correctness. The reciprocal rank of a query response is the multiplicative inverse of the rank of the first correct answer. The mean reciprocal rank is the average of the reciprocal ranks of results for a sample of queries Q:
\begin{center}
	$MRR = \frac{1}{|Q|} \Sigma_{i=1}^{|Q|} \frac{1}{rank_i}$
\end{center}
The reciprocal value of the mean reciprocal rank corresponds to the harmonic mean of the ranks.
\end{description}

\subsection{Results and Comparisons}
\label{results}
We compare our approach with most methods studied in \cite{platt}: Oriented Principal Component Analysis (OPCA) \cite{platt}, Canonical Correlation Analysis \cite{cca}, Coupled Probabilistic Latent Semantic Analysis (CPLSA) \cite{platt}, Joint Probabilistic Latent Semantic Analysis (JPLSA) and Cross-Lingual Latent Semantic Indexing (CLLSI) \cite{cllsi}. 
\\
As a baseline we do random dictionary initialization without incorporating any unlabelled document from either of the language. We add unlabelled (un-aligned) documents in a step-by-step fashion to each language and compare the results obtained in terms of Top-1 accuracy and MRR scores. Figure \ref{fig2} presents a comparison of the results obtained using Self-Taught Learning based dictionary initialization against random dictionary initialization. We chose different amounts of unlabelled data used for dictionary initialization for both the languages. The results thus  obtained highlight the advantage of dictionary initialization using unlabelled data.

\begin{figure*}[t!]
\vspace{-2mm}
\begin{center}
\resizebox{10cm}{!} 
{
\vspace{-6mm}
\includegraphics[width=400pt]{plotselftaught.pdf}
}
\end{center}
\vspace{-8mm}
\caption{ \footnotesize The advantage of Self-Taught Learning: incorporating unaligned documents for dictionary initialization. The different colored lines represent the amount of unlabelled documents used in each language. }
\label{fig2}
\end{figure*}

Figure \ref{fig3} shows the MRR performance of all the methods on the Wikipedia dataset for varying dimensionality. We see that the proposed algorithm outperforms all the existing approaches for each of the dimensionality tested. The number of dictionary atoms used represents the dimensionality of the document representations learnt using the CDL algorithm. These results include dictionary initialization from 5000 unlabelled documents from each language instead of randomly initializing the dictionaries for each language.

\begin{figure*}[t!]
\vspace{-2mm}
\begin{center}
\resizebox{10cm}{!} 
{
\includegraphics[width=400pt]{plotres1.pdf}
}
\end{center}
\vspace{-8mm}
\caption{ \footnotesize Comparison of MRR scores. The proposed algorithm is termed CDL.}
\label{fig3}
\end{figure*}

\begin{table}[!h]
%\setcounter{table}{9}
\centering
%\resizebox{!}{!} 
{
	\begin{tabular}{|l|c|c|c|}
	\hline
	\textbf{Algorithm}  & \textbf{Dimension} & \textbf{Accuracy} & \textbf{MRR}\\
	\hline
	OPCA & 1500 & 0.72 & 0.78\\
	\hline
	CCA & 1000 & 0.68 & 0.74\\
	\hline
	CPLSA & 1000 & 0.63 & 0.66\\
	\hline
	JPLSA & 1000 & 0.59 & 0.65\\
	\hline
	CL-LSI & 5000 & 0.52 & 0.60\\
	\hline
	CDL & 1000 & \textbf{0.74} & \textbf{0.79}\\
	\hline
	\end{tabular}
}
\vspace{4mm}
\caption{Results for cross-lingual document retrieval for Wikipedia dataset.}\label{tbl1}
\end{table}

We present the averaged Top-1 accuracy and MRR scores for the different approaches compared in Table \ref{tbl1}. These results incorporate unlabelled data amounting to 5000 unlabelled documents in each language. The proposed CDL algorithm along with Self-Taught Learning based dictionary initialization outperforms all other methods in terms of both Top-1 accuracy and MRR scores.


%\begin{itemize}
%	\item Tell the methods we compare against
%	\item Table for each metric
%	\item Vary the dictionary size to get a graph
%	\item Get scatter plots as well to compare results with others
%\end{itemize}

\section{Contributions \& Conclusions}
\label{conclude}
In this work, we presented a novel algorithm for learning coupled representations for the task of cross-lingual information retrieval. The proposed Coupled Dictionary Learning algorithm learns a pair of dictionaries along with two separate transformations which transform document representations from one language to the other. To the best of our knowledge this is the first work that shows the importance of utilizing unlabelled (i.e., non-aligned) corpora to improve cross-lingual retrieval performance and present a novel method to do the same.
\\\\
From the results presented in Figure \ref{fig2} it is evident that dictionary initialization on unlabelled documents significantly improves the MRR scores as compared to randomly initialized dictionaries. The efficacy of the proposed CDL algorithm is demonstrated by its superior performance on the task of cross-lingual document retrieval when compared against existing approaches (Table \ref{tbl1}). When compared with five existing approaches, our algorithm provides the best results in terms of Top-1 accuracy as well as Mean Reciprocal Rank.
\\\\
With this work, we wish to introduce to the cross-lingual research community, the field of sparse dictionary learning which is gaining popularity in recent years by performing well for various text processing tasks like domain adaptation (\cite{cikm1}) and topic detection (\cite{cikm2}). Our work is quite different from many pioneering studies on Cross-Lingual Information Retrieval as our proposed algorithm learns two separate representations for documents in each language and at the same time couples these representations and learns the transformation functions to convert one representation to the other. We believe that representations learnt in this manner would perform quite well on cross-lingual classification tasks as well. We intend to develop dictionary learning algorithms aimed at learning discriminative translingual representations for classification tasks as a future extension of this work.

%\begin{itemize}
%	\item Tell the imp of CDL
%	\item Mention what all contributions we make
%	\item Conclude
%	\item Discuss future applications
%\end{itemize}


\begin{thebibliography}{4}

\bibitem{nmf} D. Lee and H. Seung. Learning the Parts of Objects by
Non-negative Matrix Factorization. Nature, 1999.

\bibitem{ksvd} M. Aharon, M. Elad, and A. Bruckstein. The K-SVD: An Algorithm for Designing Overcomplete Dictionaries for Sparse Representation. IEEE TSP, 54(11):4311–4322, 2006.

\bibitem{coupled1} D. Lin and X. Tang. Coupled space learning of image style transformation. In ICCV. IEEE, 2005.

\bibitem{coupled2} J. Yang, J. Wright, T. Huang, and Y. Ma. Image super-
resolution via sparse representation. IEEE Trans on IP, 19(11):2861–2873, 2010.

\bibitem{scdl} Wang, S. and Zhang, L. and Liang Y., and Pan, Q. Semi-Coupled Dictionary Learning with Applications in Image Super-resolution and Photo-Sketch Synthesis. International Conference on Computer Vision and Pattern Recognition (CVPR) 2012.

\bibitem{selftaught} Rajat Raina, Alexis Battle, Honglak Lee, Benjamin Packer, Andrew Y. Ng. Self-taught Learning: Transfer Learning from Unlabeled Data. 24th International Conference on Machine Learning, Corvallis, OR, 2007.

\bibitem{platt} John C. Platt, Kristina Toutanova, and Wen-tau Yih.
2010. Translingual document representations from discriminative projections. In Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing, EMNLP ’10, pages 251–261, Stroudsburg, PA, USA. Association for Computational Linguistics.

\bibitem{cllsi} Susan T. Dumais, Todd A. Letsche, Michael L. Littman,
and Thomas K. Landauer. 1997. Automatic crosslanguage retrieval using latent semantic indexing. In AAAI-97 Spring Symposium Series: Cross-Language Text and Speech Retrieval.

\bibitem{cca} Alexei Vinokourov, John Shawe-Taylor, and Nello Cristianini. 2003. Inferring a semantic representation of text via cross-language correlation analysis. In S. Thrun S. Becker and K. Obermayer, editors, Advances in Neural Information Processing Systems 15, pages 1473–1480, Cambridge, MA. MIT Press.

\bibitem{cikm1} Rishabh Mehrotra, Rushabh Agrawal, Syed Aqueel Haider. Dictionary based Sparse Representation for Domain Adaptation . In  21st ACM Conference on Information and Knowledge Management CIKM 2012.

\bibitem{cikm2} S. P. Kasiviswanathan, P. Melville, A. Banerjee, and
V. Sindhwani. Emerging Topic Detection using Dictionary
Learning. In CIKM, pages 745–754, 2011.

\end{thebibliography}


%\section*{Appendix: Springer-Author Discount}

\end{document}
