\documentclass[preprint]{elsarticle}

\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{subfigure}
\usepackage{float}
\usepackage{lineno}
\usepackage{hyperref}
\graphicspath{{fig/}}

\linenumbers
\journal{Pattern recognition letters}

\begin{document}

\begin{frontmatter}

\title{MM-PLSA}

\author[1]{A. Shabou}\ead{aymen.shabou@cea.fr}
\author[1]{A. ZNaidia}\ead{amel.znaidia@cea.fr}
\author[1]{H.Le borgne}\ead{herve.le-borgne@cea.fr}
\author[2]{C. Hudelo}\ead{...@...}


\address[1]{CEA}
\address[2]{CEntral}

\begin{abstract}
This paper addresses scene classification and object recognition problems within a multimodal probabilistic latent semantic analysis (pLSA) framework.
The contribution of the current work is three-fold.
(1) We propose a comprehensive study for designing multimodal indexing system relying on latent space based models, mainly the pLSA.
Unlike the existing approaches, we propose a more coherent and appropriate generative model for multimedia documents.
(2) We extend the recent advances related to Fisher vector based signature in scene classification to deal with the topic-based features.
An efficient  yet effective version of corresponding features is proposed.
(3) Extensive evaluation of multimodal pLSA approaches using visual and tag features on the two challenging benchmarks Pascal VOC 2007 and Mir Flickr is reported.
The proposed framework clearly outperforms state-of-the-art methods in scene classification while remaining computationally competitive and scalable to large-scale datasets. 
\end{abstract}

\begin{keyword}
....mm-plsa
\end{keyword}

\end{frontmatter}

%------------------------------------------------------------------------------------------------------------------------------------------
\section{Introduction}
\label{section.intro}
Scene classification consists in automatically assigning an image to one or several categories among a fixed number of scene categories.
This task becomes more and more challenging as the size of the dataset and class number grow.
Most of scene classification systems use either the surrounding text (tags, caption) or low-level visual features for document indexing.
Since most of the emerging datasets are multimodal, active research is devoted to combine (or fuse) different single modes in order to build more effective  indexing systems and to improve the classification accuracy~\cite{nagai.06.tencon,bosch.08.pami,guillaumin.10.cvpr,kawanabe.11.wacv}.
The same issues are addressed for image retrieval and automatic annotation tasks~\cite{zhang.05.iccv,lienhart.09.civr,chandrika.10.civr}.

In the current work, we only focus on the combination of visual and textual (tag-based) modes for scene categorization.
The proposed study might be extended to incorporate other collateral modes and deployed for other tasks.

To cope with large scale datasets while keeping the classification system efficient and robust, different issues have been investigated by the communities of document modeling and pattern recognition; that are mainly:
\begin{enumerate}
\item \textbf{Signature design:} the signature has to be compact for computational efficiency, while being enough class-discriminative. This point involves the local/global descriptor extraction step either from textual or visual modes of documents, as well as some linear/non-linear operations that have to be performed on these descriptors to bridge the semantic gap between the high-level concepts that human perceives and the final signature that describes an image;

\item  \textbf{Classification approach:} given a set of features, classification has to manage on one hand the size of the training set and on the other hand the total number of concepts to recognize.
For most of the well-known classifiers, although the training step is usually performed off-line, scaling with the size of datasets still be a challenging problem and the trade-off between memory/time complexities and  robustness toward overfitting or complex class-separability  still remain a topical issue.
\end{enumerate}

The two issues stated above are closely related since providing suited features to a classifier in terms of size, sparsity and discriminative power might  improve the performances of the overall classification system.

Concerning the first issue, recent works focusing on the Bag-of-Words (BoW) feature propose various methods to improve its generative as well as discriminative power~\cite{lazebnik.06.cvpr,yang.09.cvpr,yu.09.nips,boureau.10.cvpr,wang.10.cvpr}.
However the large sized  final features is its most important limitation.
Probabilistic models with latent topic variables such as probabilistic Latent Semantic Analysis (pLSA)~\cite{hofmann.01.ml},  Latent Direchlet Allocation (LDA)~\cite{blei.03.mlr} and their other extensions have emerged from large collection textual document modeling challenges to give rise to compact signatures.
The purpose of these generative models is to reduce the signature's size by mapping original BoW descriptions into a low dimensional and well fonded semantic space.
Especially for image and scene analysis, this mapping is able to disambiguate the standard BoW visual feature~\cite{monay.04.icmr,quelhas.05.iccv,fei.05.cvpr,bosch.08.pami}.
Indeed, visual codewords, that are elementary parts of image description, are  semantically ambiguous, and thus replaced by latent topics close to human perception.
Success of such modeling has been reported in several works addressing image retrieval~\cite{lienhart.09.civr,chandrika.10.civr}, image annotation~\cite{barnard03.mlr,monay.03.icmr,monay.04.icmr}, image classification~\cite{quelhas.05.iccv,nagai.06.tencon,bosch.08.pami}, etc.
Some of them show also that multimodality improves again the generative power of such models.
However, combining different modes, such as the visual and tag ones,  in an effective way still remain a challenging problem.

Concerning the second issue, efforts in scene classification are devoted to accelerate classification processes and to manage large scale datasets.
Kernel based support vector machines have been extensively used and significantly improved, showing interesting performances and scalability to cope for instance with 1.2 million images belonging to 1000 classes as reported recently by~\cite{lin.11.cvpr}.
Non parametric classifiers such as the Naive-Bayes Nearest-Neighbor (NBNN) are also appropriate to handle large number of classes without the need of a learning/training step and thus avoiding parameter overfitting~\cite{boiman.08.cvpr}.

In this work we are rather concerned by the feature design step and the aim is (1) to incorporate multimodality in pLSA models within a more coherent framework than the state-of-the-art do and (2) to adapt features resulting from our framework to  kernel based classifiers.
For the first goal, our contribution consists in introducing what we call multimodal (or multimedia) words (MM-words) and topics (MM-topics) that are more appropriate to build multiomdal pLSA models, while being simple and efficient for parameter learning.
Concerning the second objective, we propose to extend  recent works addressing the visual Fisher vector~\cite{perronnin.07.cvpr,perronnin.10.cvpr} and its approximate variant~\cite{jegou.10.cvpr} to our multimodal framework, tacking advantages of merging generative models with discriminative classifiers to put in place competitive recognition systems~\cite{jaakkola.99.nips}.

The rest of this paper is organized as follows.
In section~\ref{section.art}, we propose a comprehensive study of monomodal and mainly multimodal pLSA based indexing systems.
In section~\ref{section.mmplsa}, the proposed multimodal framework is introduced.
The new formulation of multimodal pLSA is detailed then the Fisher vector based semantic feature generation technique is described.
Experiments to evaluate state-of-the-art methods and compare their performances against the proposed one are reported in section~\ref{section.expe}.
Finally we conclude the work and give some future directions in section~\ref{section.ccl}.

%------------------------------------------------------------------------------------------------------------------------------------------
\section{Related work}
\label{section.art}

Probabilistic latent semantic analysis provides a statistical model for image clustering into multiple object categories or topics in an unsupervised manner.
It can be seen as a probabilistic formulation of the Latent Semantic Analysis (LSA) originally proposed for textual indexing in~\cite{landauer.98.dp}.
The goal of LSA is to reduce the size of classic BoW features since the term-document matrix is always sparse and rank deficient.
Dimension reduction is performed using the Singular Value Decomposition (SVD) technique, where the $k$-top-largest eigenvalue terms are selected to form a reduced matrix defining the latent space.

A topic (or aspect) is formally a latent (or unobserved) variable that bridges the semantic gap between high-level concepts that human perceives and low-level features that usually describe images.
Furthermore, topic is useful to disambiguate the fact that different words can express a same concept (synonymy) and one word might have different meanings depending on the context (polysemy).
While the former is a well prominent phenomenon in presence of large dictionaries, the later characterizes small ones.
This leads to the hard compromise between the discriminative power of a codebook and its size within the BoW feature design approach; which is less critical with topic modeling.
It is then assumed that a given document is indeed a mixture of topics that results in the occurrences of the words observed in a document.
%We shall note also that given such model, a word could come from more than one aspect.
Hence, pLSA is nothing more than a generative model for word occurrences in a document.

We denote by $\mathcal{D}=\{d_1, ..., d_N\}$ a set of $N$ documents that are described with the corresponding BoW feature vectors set $\mathcal{F}=\{f_1, ..., f_N\}$, each one of size $K$, that is the size of a codebook $\mathcal{W}=\{w_1, ..., w_K\}$, a set of words learned from a training set of local features. 
Consider $\mathcal{Z}=\{z_1, ..., z_T\}$ a set of hidden variables related to the topics, the pLSA generative model is expressed as follows:
\begin{equation}
P(d_i, w_k) = P(d_i)\sum_{j=1}^{T}P(z_j|d_i)P(w_k|z_j)\,,
\end{equation}  
with $P(d_i)$ the probability of a document $d_i$ to be picked in the dataset, $P(z_j|d_i)$ the probability of the topic $z_j$ given the current document $d_i$ and $P(w_k|z_j)$ the probability of a word $w_k$ given a topic $z_j$.
Contrarily to the BoW based approaches, here the word occurrences in a document are conditionally independent from the document given the aspect.
Figure~\ref{subfigure.plsa} depicts the graphical presentation of the above generative model.

\begin{figure*}
\vspace{2cm}
\centering
\subfigure[]{
    \includegraphics[width=5cm,height=4cm]{plsa}
  \label{subfigure.plsa}
}
\subfigure[]{
    \includegraphics[width=5cm,height=4cm]{mlplsa}
  \label{subfigure.mlplsa}
}
\subfigure[]{
    \includegraphics[width=5cm,height=4cm]{tplsa}
  \label{subfigure.tplsa}
}
 \caption{Graphical illustrations of various generative pLSA models (mono and multi modes).
\subref{subfigure.plsa} standard monomodal pLSA,
\subref{subfigure.mlplsa} multilayer monomodal pLSA of~\cite{lienhart.09.civr},
\subref{subfigure.tplsa} multimodal pLSA of~\cite{chandrika.10.civr}. $N$ (resp. $K$) is the number of observed documents $d$ (resp. words $w$), $z$ corresponds to topic and $v$ (resp. $t$) denotes the visual (resp. textual) mode. Gray color indicates an observed (non-latent) variable.} 
 \label{figure.plsamodels}
\end{figure*}

The topic mixture vector $[P(z_1|d_i), ..., P(z_T|d_i)]^T$ is a high-level vector representation of the document $d_i$.
Generally the number of topics $T$ is much smaller  than the number of words $K$, leading to a more compact semantic representation than the original BoW signature $f_i$.
%As a result, pLSA maps the high dimensional word distribution vector of a document to a lower dimensional topic vector.

In order to learn the model, a term-document matrix $\mathbf{F}$ of the training corpus, which is the concatenation of the feature vectors in  $\mathcal{F}$,  is required and the Expectation-maximization (EM) algorithm is usually preformed.
The former counts the occurrences of  words $w_k$ in documents $d_i$ and the latter is used to learn the  unobserved probability distributions (or parameters) $P(z_j|d_i)$ and $P(w_k|z_j)$  from the training data by maximizing the likelihood function.

This brief description of standard pLSA considers only one mode, either visual or textual for instance.
When combining multiple modes, different strategies have been proceeded in the literature.

Authors of~\cite{monay.04.icmr} adopt a sequential algorithm to fuse  textual and visual modes for image auto-annotation.
Assuming that textual features are semantically more consistent than visual ones, authors start by building a latent space while focusing only on textual features and then learn visual variation conditioned on that space.
Thus a first monomodal  pLSA is applied on the textual features, that are generated by computing the occurrences of tag-words from the training dataset image captions.
Then, a second pLSA model relying on the visual mode is learned while keeping the parameters $P(z_j|d_i)$ from above fixed and computing $p(w_k^v|z_j)$ ($w_k^v$ is a visual word).
The latter is used to derive the topic-feature from the visual words of a given image.
As a result, the automatic annotation of a test document $d_i$ is inferred using the posterior probability $P(w_k^t|d_i)$.
%\begin{equation}
%P(w_k^t|d_i) = \sum_{j=1}^T P(w_k^v|z_j) P(z_j|d_i)\,.
%\end{equation}
%with $w_k^v$  a tag word and $d_i$ a test document to annotate
%Authors of~\cite{zhang.05.iccv} propose ...

In~\cite{lienhart.09.civr} authors propose a multilayer multimodal pLSA image retrieval system that significantly improves classic unimodal systems. 
The idea behind this work is to derive high-level topics that  generate both visual and  tag topics, which subsequently generate visual and tag words respectively as we can see in the corresponding graphical model depicted in figure~\ref{subfigure.plsa}.
However, this approach induces an explosion in the required training time  in order to accurately estimate the parameters as shown by the authors when deriving the log-likelihood function and using the EM algorithm for parameter estimation.
A solution was proposed to avoid such a heavy processing by performing individual pLSAs for each mode.
Two leaf-pLSAs  models are then built.
As a result, two corresponding separate sets of topics  are learned.
The obtained models are then considered as an initialization to train a top-level pLSA node merging the two leaf-pLSAs.
EM convergence is so accelerated  leading to a top-level set of topics merging the multimodal knowledges.

Authors of~\cite{chandrika.10.civr}
% have replaced the standard co-occurrence matrix required by the classic pLSA or LSI with a $m$-order tensor, where $m-1$ is the total number of modes.
assume that the pair of random variables corresponding to the occurrences of  visual and tag words are conditionally independent given a document as it has been proposed earlier in~\cite{zhang.05.iccv}.
Then the joint probability of visual and tag words with a given document is expressed as follows:
\begin{eqnarray}
P(w_k^t,w_{k'}^v,d_i) = p(w_{k'}^v|d_i) p(w_k^t|d_i) p(d_i)\,,
%&=& P(w_l^v|w_k^t,d_i) P(w_k^t,d_i)\nonumber\\
%& = &p(w_l^v|w_k^t,d_i) p(w_k^t|d_i) p(d_i) \nonumber\\
%(\text{conditional independence})		
\end{eqnarray}
with $p(w_{k'}^v|d_i)$ (resp. $p(w_k^t|d_i)$) the probability of occurrence of visual (resp. tag) word $w_{k'}^v$ (resp. $w_k^t$) given a document $d_i$.
Their proposed generative model is expressed as the following:
\begin{equation}
P(w_k^t,w_{k'}^v,d_i) = P(d_i)\sum_{j=1}^T P(w_k^t|z_j) P(w_{k'}^v|z_j) P(z_j|d_i)\,,
\end{equation}
where $z_j$ is a higher level aspect that generates both visual and textual words.
The graphical illustration of this model is presented in figure~\ref{subfigure.plsa}.
Following the maximum likelihood principle, the three parameters $p(w_k^t|z_j) $, $p(w_{k'}^v|z_j)$ and $p(z_j|d_i)$ are learned by maximizing the log-likelihood function using the EM algorithm.
%Finally, documents are mapped to a lower dimensional latent vector derived from both text and visual words.

The aforementioned strategies clearly differ in the way of fusing modes for pLSA model design.
The sequential algorithm of~\cite{monay.04.icmr} does not take advantage of the two modes jointly, which is in our opinion a limited fusion approach. 
When joint modes learning is performed, it still be not clear if considering the same set of topics for generating both visual and tag words, as considered by~\cite{zhang.05.iccv,chandrika.10.civr}, is consistent with the heterogeneities of visual and tag words.
Even if the interesting work of~\cite{lienhart.09.civr} has addressed this issue by adding latent high-level topic that generates different tag and visual topics, each one of them subsequently generates the corresponding mode words, there is no guarantee to reach good optima of the model in a reasonable time.

In order to improve both the consistency of the multimodal pLSA modeling and the efficiency of the learning algorithm, we propose to investigate a new direction.
First, we define a new multimodal (or multimedia) word as the elementary part of a multimodal (multimedia) document.
Then we extend the pLSA formalism in order to be able to generate such multimodal words through multimodal topics.
The new framework gives rise to a more comprehensive work for studying the multimodal  behavior within the pLSA approach (or other topic-based models).
We further enrich again the obtained multimodal semantic feature relying on the generative Fisher vector approach.

%------------------------------------------------------------------------------------------------------------------------------------------
\section{MM-pLSA}
\label{section.mmplsa}

Authors of~\cite{monay.04.icmr} assume that features generated by different modes, a visual and tag ones for instance, should not have the same importance in defining the latent space.
On one hand, tag-words have higher semantic level than the visual-words.
On the other hand, the visual co-occurrence matrix across the training dataset do not necessarily imply a semantic relationship between images.
Even if the same hypothesis will be considered in the current work, the model we propose is completely different since the fusion process is not sequential and relies on  multimodal words as well as topics.

First, we introduce the multimodal word concept and en efficient algorithm to design it.
Then, the standard generative  pLSA model is applied for multimodal topic feature creation.
We call these features MM-pLSA. 
Finally, we enrich the latter using Fisher vectors, exploring higher order statistics of the generative model.
Final features are called FMM-pLSA.

\subsection{MM-words}
We define a MM-word (or multimodal word) as the elementary part of a multimedia document, similar to the usual definitions of visual and tag words as elementary parts of an image document and its corresponding caption respectively.

Common discrete methods for codebook design, like K-means,  proceed by clustering local descriptors to end up with codewords that quantize the descriptor space.
Similar approaches are proposed by mixtures of Gaussian estimation for continuous codebooks.
In this section we focus on an efficient discrete technique, while a continuous one is proposed in section~\ref{section.fmm}.

We denote by $\mathcal{T}_d$ the set of tags associated to a document $d$ and $\mathcal{T}$ the set of all non-redundant tags of the dataset.
A simple way to build the discrete  multimodal dictionary  $\mathcal{W}^{m}$ is to first express each tag of $\mathcal{T}$ over a discrete visual vocabulary $\mathcal{D}^v$.
On one hand, the obtained matrix of tag-codes over  $\mathcal{W}^v$ might show us some relationship between visual and tag words.
On the other hand, the assumption made on the semantic superiority of tag words over visual ones is respected.
Then a clustering of the obtained tag-codes is performed in order to capture only the relevant codes expressing the main tag-visual relationships.

Let us denote by $\mathbf{M}$ the matrix of tag-codes of size $K^{v}\times K^{t}$, with $K^{v}$ (resp. $K^{t}$) the size of a visual (resp. tag) codebook.
 $\mathbf{M}$ is actually build as a co-occurrence matrix of visual words in tags associated to their corresponding documents, i.e.,
\begin{equation}
\mathbf{M}(k,j) = \sum_{d_i \in \mathcal{D}\,; t_j \in \mathcal{T}_{d_i}} \mathbf{F}(k,j)\,,
\end{equation}
with $d_i$ the $i^{th}$ document in the set $\mathcal{D}$, $t_j$ a tag in $\mathcal{T}_{d_i}$ and $\mathbf{F}$ the term-document matrix already defined in section~\ref{}. 
This matrix is then $l_1$ column normalized, expressing the frequency of a given visual word relatively to a given tag within the whole training set.
Clustering these tag-codes (column vectors) leads to the set of cluster centers $\mathcal{W}^{m}=\{w^m_1,...,w^m_K\}$ that we call MM-words, each one is of length $K^v$.

\subsection{MM-topics}

Since the elementary part is well defined, multimodal topics could be easily estimated relying on the standard monomodal pLSA.
A multimodal term-document co-occurrences matrix $\mathbf{F}^m$ is first built by computing the MM-word occurrences in each document.
Then, we consider the following generative model of MM-words:
\begin{equation}
P(w_k^m,d_i) =P(d_i) \sum_{t=1}^{T}P(z_t^m|d_i)P(w_k^m|z_t)\,,
\end{equation}
that is simply related to the standard pLSA applied to design a generative model of such MM-words. 
This model, we call MM-pLSA,  can be explained as the following:
\begin{itemize}
\item select a document $d_i$ with a probability $P(d_i)$ which is proportional to the size of $d_i$;
\item pick a latent MM-topic $z_t^m$ with probability $P(z_t^m|d_i)$;
\item generate a MM-word $w_k$ with probability $P(w_k|z_t^m)$. 
\end{itemize}

The mixture  parameters $P(z^m|d_i)$ and $P(w^m|z_t^m)$ can be learn from the training data by maximizing the likelihood function:
\begin{equation}
P(\mathcal{D},\mathcal{W}) = \prod_{d_i} \prod_{w_k^m} P(w_k^m,d_i)^{\mathbf{F}^m(i,k)}\,,
\end{equation}
using the EM algorithm or its tempered version (TEM) proposed by~\cite{hofmann.01.ml}.
%The log-likelihood could be expressed as the following:
%\begin{equation}
%...
%\end{equation}
By alternating an expectation step (E-step) where the posterior over the topic is computed; and a maximization one (M-step) where the parameters are updated, convergence of the algorithm is met due to the sequential likelihood measurements.

\paragraph{E-step}
%Intuitively, this step gives the best guess of the membership of each MM-word and document to a given topic, since they are considered as mixtures of topics.
Using the Bayes rule, the posterior probability over a MM-topic is given as follows:
\begin{equation}
P(z_t^m|d_i,w_k^m) = \frac{P(w_k^m|z_t^m) P(z_t^m|d_i)}{\sum_{r=1}^T P(w_k^m|z_r^m) P(z_r^m|d_i)}\,.
\end{equation}
Terms $P(w_k^m|z_t^m)$ and $P(z_t^m|d_i)$ for all the MM-topics are initially chosen randomly.
%\begin{equation}
%P(z_t^m|d_i,w_k^m) = \frac{P(w_k^m,d_i|z_t^m)P(z_t^m)}{\sum_{r=1}^T P(w_k^m,d_i|z_r^m)P(z_r^m)}\,. 
%\end{equation}
%The conditional independence of MM-words and document given a MM-topic, which is the core assumption of the pLSA model, leads to $P(w_k^m,d_i|z_t^m)=P(w_k^m|z_t^m) P(d_i|z_t^m)$. 
%Then
%\begin{equation}
%P(z_t^m|d_i,w_k^m) = \frac{P(w_k^m|z_t^m) P(d_i|z_t^m)P(z_t^m)}{\sum_{r=1}^T P(w_k^m|z_r^m) P(d_i|z_r^m)P(z_r^m)}\,.
%\end{equation}
%Terms $P(w_k^m|z_t^m)$, $P(z_t^m|d_i)$ and $P(z_t^m)$ for all the MM-topics are initially chosen randomly, than estimated in the previous M-step iteration as the following.
\paragraph{M-step}
Parameters update is performed as follows:
\begin{equation}
P(w_k^m|z_t^m) = \frac{\sum_{i=1}^N \mathbf{F}(k,i) P(z_t^m|d_i,w_k^m)}{\sum_{k=1}^K\sum_{i=1}^N  \mathbf{F}(k,i) P(z_t^m|d_i,w_k^m)}\,,
\end{equation}

\begin{equation}
P(z_t^m|d_i) =  \frac{\sum_{k=1}^K  \mathbf{F}(k,i) P(z_t^m|d_i,w_k^m)}{n(d_i)}\,,
\end{equation}
with $n(d_i)$ the length of a document $d_i$.

%\begin{equation}
%P(z_t^m) = \frac{\sum_{k=1}^K \sum_{i=1}^N  \mathbf{F}(k,i) P(z_t^m|d_i,w_k^m)}{\sum_{k=1}^K \sum_{i=1}^N  \mathbf{F}(k,i)}\,.
%\end{equation}

%Intuitively, this step maximizes the expectation of the complete data log-likelihood function leading to the best parameters at the current iteration.


Once the model parameters are estimated, a document $d$ in the training set is indexed by the MM-topic feature vector $P(z^m|d^{train})$, while a test document is indexed by the vector $P(z^m|d^{test})$ computed using the fold-in heuristic~\cite{hofmann.01.ml}.
The latter consists in maximizing the likelihood of the document $d^{test}$ with a partial version of the EM algorithm described above, i.e., $P(z^m|d^{test})$ maximizes the
likelihood of the test document with respect to the previously learned $P(w_k^m|z_t^m)$ parameters from the training.


 
%As we clearly see, a  large number of parameters need to be estimated across  the training dataset.
%The problem of over-fitting could be induced by the classic EM algorithm and thus leading to poor generalization to unseen data.
%The tempered version proposed in~\cite{hofmann.01.ml} is always more appropriate.
%It combines the deterministic annealing~\cite{rose.90.prl} with the EM algorithm resulting in the so called Tempered Expectation Maximization (TEM).
%Once the $T$ conditional word distributions $P(w_k|z_t)$ has been learned, it will be fixed in order to infer the new topic feature $P(\mathbf{z}|d_i)=[P(z_1|d_i), P(z_2|d_i), ..., P(z_T|d_i)]^T$ knowing the word occurrence vector $P(\mathbf{w}|d_i)=[P(w_1|d_i), ...., P(w_K|d_i)]^T$ from the test image $d_i$, i.e., $P(\mathbf{z}|d_i) = P(\mathbf{w}|\mathbf{z}) ^{-1} P(\mathbf{w}|d_i)$.

\subsection{Spatial pyramids}
One of the key steps that has significantly improved the BoW based feature design for scene and object  recognition is incorporating the spatial context into features.
The spatial pyramids method introduced by~\cite{lazebnik.06.cvpr} consists in decomposing a given image into sub-regions, and then compute the features individually at the resulted different resolution levels. 
The final signature is just the concatenation of the obtained region-based BoW features.
The same concept has been extended to the topic based features in~\cite{ergul.10.icpr}.
Co-occurrence matrices corresponding to the sub-regions are considered to derive sub-region parameter models. 
Finally, as above, the whole topic feature is a concatenation of sub-region topic features.

\subsection{FMM-pLSA features}
\label{section.fmm}
%To deploy the Fisher vector generation framework of~\cite{perronnin.07.cvpr} within the pLSA model, continuous vocabularies might be considered to derive the required parameters.
%We shall refer to the interesting work~\cite{horster.08civr}, where authors proposed an interesting study of various continuous vocabularies within the pLSA models and a comparison of their performances for scene recognition.
%First of all, they show the superiority of continuous vocabularies based pLSA models over the classic discrete one (using the K-means for instance).
%Then they compared the following various continuous codebook in conjunction with the topic learning strategies: (1) shared Gaussian words between topics  (SGW-pLSA), (2) fixed shared Gaussian words (FSGW-pLSA) and (3)  Gaussian mixture of words  for each topic (GM-pLSA).
%They empirically observed that the second one (FSGW-pLSA), which is actually the fastest one for learning, outperforms the others.
%Hence, the continuous visual vocabulary is learned, before the topic learning step, on a set of local features from the training set. 
%Then, the following generative pLSA model is considered while keeping the mixture of Gaussian parameters fixed (i.e.,  P$(f_i|g_k)$) from above:
%\begin{equation}
%P(f_i,d_i) = P(d_i) \sum_{j=1}^T \sum_{k=1}^K P(f_i|g_k) P(g_k|z_j) P(z_j|d_i)\,,
%\end{equation}
%where $P(f_j|g_k) = \mathcal{N}(f_j|\mu_k,\Sigma_k)$ is a multivariate Gaussian distribution over the feature vector space modeling the Gaussian component $g_k$.
%Thus time needed for EM convergence is the same as the one required by a classic pLSA estimation algorithm.
%
%Based on the generative model above, the Fisher vector is derived thereafter.
 
Fisher vector generation framework of~\cite{perronnin.07.cvpr} is  a higher order generative model for local features in image.
Since pLSA models the word occurrence in documents, we deploy here the Fisher vector in order to enrich the generative pLSA modeling of word occurrences.
The main assumptions we make are the following:
\begin{enumerate}
\item  topics are approximated by GMM that are learned on the $P(\mathcal{W}^m|\mathcal{Z}^m)$ matrix. 
Each Gaussian $\mathcal{N}_j(\alpha_j, \mu_j, \Sigma_j)_{ j=1, ..., T}$ represents a topic with a weight $\alpha_j$, a mean vector $\mu_j$ and a covariance matrix $\Sigma_j$;
\item a word is a mixture of topics that are already learned;
\item words are independent;
\end{enumerate}
 
We denote by $\mathcal{L}(\mathcal{W}^m|\lambda)=\log P(\mathcal{W}^m|\lambda)$ the log-likelihood function, where $\lambda=\{\mu_i,\Sigma_i,\alpha_i\,; i=1, ..., T\}$ the set GMM parameters.
Under the $3^{rd}$ assumption, we have
\begin{equation}
\mathcal{L}(\mathcal{W}^m|\lambda) = \sum_{k=1}^K \log P(w_k^m|\lambda)\,.
\end{equation}
Under the $2^{nd}$ assumption, we have
\begin{equation}
P(w_k^m|\lambda) = \sum_{j=1}^T \alpha_j P_j(w_k^m|\lambda)\,,
\end{equation}
such that
\begin{equation}
\sum_{j=1}^K \alpha_j = 1\,,
\end{equation}
and
\begin{equation}
P_j(w_k^m|\lambda) = \frac{\exp\big(-\frac{1}{2} (...) \big)}{b}\,,
\end{equation}
%------------------------------------------------------------------------------------------------------------------------------------------
\section{Experiments}
\label{section.expe}

In the current work, we have focused on textual and visual representations only.
Nevertheless, the approach could be extended to other modes, such as audio for video based multimedia documents.

\subsection{Pipeline}

\begin{itemize}
\item{\textbf{Codebook learning:}}

\item{\textbf{Topic learning:}}

\item{\textbf{Classifier:}}
\end{itemize}

We are comparing our contribution to different state-of-the-art approaches, namely:
\begin{itemize}
\item{\textbf{Single mode pLSA (S-pLSA):}} referring to either textual only or visual only mode using standard pLSA;
\item{\textbf{Naive multimodal pLSA (N-pLSA):}} referring to a simple fusion of individual classifiers learned on single modes;
\item{\textbf{Multilayer-pLSA (ML-pLSA)~\cite{lienhart.09.civr}:}} 

\item{\textbf{Tensor-pLSA (T-pLSA)~\cite{chandrika.10.civr}:}} we note that the tensor representation is rather required by the direct LSI modeling.
Anyway, we kept this appellation for the pLSA model since estimating only one topic layer for visual and textual words generation is very close to the High order SVD step used in the corresponding LSI model.

\item{\textbf{Multimodal-BoW (M-BoW)~\cite{}:}}
\end{itemize}

We use the mean Average Precision (mAP) measure to evaluate the classification performances of the above indexing approaches.


\subsection{Pascal Voc 2007}

\begin{table}[ht]
\centering
\begin{small}
\begin{tabular}{|l||c|c|c|}
\hline
\textbf{T} (topics)$\mathbf{\rightarrow}$ & 50 & 100 & 150 \\
\textbf{Model}& & & \\
$\mathbf{\downarrow}$& & & \\
\hline\hline
\textbf{S-pLSA }(visual) 	 	& -	& -	& -\\
\hline
\textbf{S-pLSA }(tag)			& -	 &-	& -\\
\hline
\textbf{N-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{ML-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{T-pLSA } 	 	& -	& -	& -\\
\hline\hline
\textbf{MM-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{FMM-pLSA } 	 	& -	& -	& -\\
\hline
\end{tabular}
\caption{Comparative results on Pascal VOC 2007.}
\end{small}
\label{tab.experiments}
\end{table}

\subsection{Mir Flickr~\cite{huiskes.08.mir}}

\begin{table}[ht]
\centering
\begin{small}
\begin{tabular}{|l||c|c|c|}
\hline
\textbf{T} (topics)$\mathbf{\rightarrow}$ & 50 & 100 & 150 \\
\textbf{Model}& & & \\
$\mathbf{\downarrow}$& & & \\
\hline\hline
\textbf{S-pLSA }(visual) 	 	& -	& -	& -\\
\hline
\textbf{S-pLSA }(tag)			& -	 &-	& -\\
\hline
\textbf{N-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{ML-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{T-pLSA } 	 	& -	& -	& -\\
\hline\hline
\textbf{MM-pLSA } 	 	& -	& -	& -\\
\hline
\textbf{FMM-pLSA } 	 	& -	& -	& -\\
\hline
\end{tabular}
\caption{Comparative results on Pascal MIR Flickr.}
\end{small}
\label{tab.experiments}
\end{table}


%------------------------------------------------------------------------------------------------------------------------------------------
\section{Conclusion}
\label{section.ccl}
Future work will address classification tasks related to video documents showing how the proposed model could  be easily extended to cope with various modes and studying the performance gain over the literature. 


%
% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliographystyle{elsarticle-harv}
\bibliography{pr12}  % sigproc.bib is the name of the Bibliography in this case
% You must have a proper ".bib" file
%  and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%
%APPENDICES are optional
%\balancecolumns

\end{document}
