\documentclass[10pt,twocolumn,letterpaper]{article}

\usepackage{cvpr}
\usepackage{times}
\usepackage{epsfig}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}


\newtheorem{thm}{Theorem}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{pf}[thm]{Proof}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{defn}[thm]{Definition}
\newtheorem{rem}[thm]{Remark}
\let\vec=\mathbf
\let\mat=\mathbf
\let\set=\mathcal
\newcommand{\para}[1]{\noindent{\bf #1}}
%\newcommand{\para}[1]{\noindent{\bf #1}\hspace{1em}}
%\newcommand{\set}[1]{{\textstyle{\mathcal #1}}}

\renewcommand{\baselinestretch}{0.98}

\newcommand{\mypara}[1]{\paragraph{#1.}}

\newcommand{\R}{\mathbb{R}}
\let\bs=\boldsymbol

\def \transpose {\mathrm{T}}
\def \score {\mathit{score}}

\def \Diag {\mathrm{Diag}}
\def \diag {\mathrm{diag}}
\def \objective {\mathit{obj}}
\def \freq {\mathit{freq}}
\def \domain {\set{D}}
\def \opt {\set{opt}}
\def \inconsistent {\textup{Conflict}}
\def \unary {\mathit{unary}}
\def \median {\textup{median}}
\def \consistency {\textup{cons}}
\def \exclusive {\textup{exclu}}
\def \saliency {\textup{\saliency}}
\def \flow {\mathit{flow}}
\def \adj {\mathit{adj}}
\def \lowrank {\mathit{lowrank}}
\def \path {\mathit{path}}
\def \map {\mathit{map}}
\def \label {\mathit{label}}
\def \match {\mathit{match}}
\def \induced {\mathit{indu}}
\def \composition {\mathit{compose}}
\def \minimize {\textup{minimize} }
\def \maximize {\textup{maximize}}
\def \subjectto {\textup{subject to}}
\def \propagation {\textup{prop}}
\def \pair {\textup{pair}}

\makeatletter
\newcommand{\thinparagraph}{%
  \@startsection{paragraph}{4}%
  {\z@}{1.0ex \@plus 0.6ex \@minus .2ex}{-1em}%
  {\normalfont\normalsize\bfseries}%
}
\makeatother


% Include other packages here, before hyperref.

% If you comment hyperref and then uncomment it, you should delete
% egpaper.aux before re-running latex.  (Or just hit 'q' on the first latex
% run, let it finish, and you should be clear).
\usepackage[pagebackref=true,breaklinks=true,letterpaper=true,colorlinks,bookmarks=false]{hyperref}

% \cvprfinalcopy % *** Uncomment this line for the final submission

\def\cvprPaperID{212} % *** Enter the CVPR Paper ID here
\def\httilde{\mbox{\tt\raisebox{-.5ex}{\symbol{126}}}}

% Pages are numbered in submission mode, and unnumbered in camera-ready
\ifcvprfinal\pagestyle{empty}\fi
\begin{document}

%%%%%%%%% TITLE
\title{Unsupervised Multi-Class Joint Image Segmentation}

\author{First Author\\
Institution1\\
Institution1 address\\
{\tt\small firstauthor@i1.org}
% For a paper whose authors are all at the same institution,
% omit the following lines up until the closing ``}''.
% Additional authors and addresses can be added with ``\and'',
% just like the second author.
% To save space, use either the email address or home page, not both
\and
Second Author\\
Institution2\\
First line of institution2 address\\
{\tt\small secondauthor@i2.org}
}

\maketitle
%\thispagestyle{empty}

%%%%%%%%% ABSTRACT
\begin{abstract}
Jointly segmentation of image sets is a challenging problem, especially when there are multiple objects shared among the image collection
and the existence of each object in each image is unknown.
In this paper, we aim to jointly segment a set of images with objects from multiple classes.
In a network that contains all images, the functional maps between connected image pairs are first extracted.
We define a shared subspace of all images which is invariant under the functional mapping.
In this subspace, a joint optimization framework is proposed to simultaneously decide which object exists in each image and generate the corresponding
segmentation functions.
This framework yields superior segmentation results, as shown in our experiments.



\end{abstract}

%%%%%%%%% BODY TEXT
\section{Introduction}

\input{introduction}

\subsection{Related Works}
\input{relatedworks}

\subsection{Notations}

Throughout this paper, we use the following convention for linear algebra notations. We use bold face capital characters (e.g., $\bs{A}, \bs{B}, \cdots$) to denote matrices, and use bold face lower characters (e.g., $\bs{d}, \bs{s}, \cdots$) to denote vectors.  Given a matrix $bs{A} = (\bs{a}_1, \cdots, \bs{a}_{m})$, we use $\textup{vec}(\vec{A}) = (\bs{a}_1^{T}, \cdots, \bs{a}_{m})^{T}$ to denote its vector form. With $\|\cdot\|_{\set{F}}$ we denote the matrix frobienius norm, i.e., $\|\bs{A} = (a_{ij})\|_{\set{F}} = (\sum\limits_{i,j} a_{ij}^2)^{\frac{1}{2}}$. In contrast, we use $\|\cdot\|_{1}$ to denote the column-wise 1-norm, i.e., for matrix $bs{A} = (\bs{a}_1, \cdots, \bs{a}_{m})$, $\|\bs{A}\|_{1} = \sum\limits_{i=1}^{m}\|\bs{a}_i\|$.

\section{Problem Statement and Overview}
\label{sec:overview}

%\subsection{Problem statement}

The input of our algorithm is a collection of $N$ similar images $\set{I} = \{I_1, \cdots, I_{N}\}$.  We assume that there are $M$ classes of objects in total among all the input images, and each image contains one or multiple objects. Without losing generality, we assume that each image $I_{i} = (\set{P}_i, \set{E}_i)$ is given by a graph of super-pixels, and we use $200$ superpixels for all the images in this paper. The output consists of (i) the classification result, i.e.,  a collection of $M$ image sets $\set{C}_{k} \subset \{1, \cdots, N\}, 1\leq k \leq M$, collecting the images that contain an object of each class, and (ii) the corresponding segmentation $S_{ik} \subset \set{P}_i$ for each image $I_i$ that belongs to class $\set{C}_{k}, 1\leq k \leq M$.



\subsection{Preliminaries and Terminology}

This Section reviews the functional map framework~ for encoding and reviewing relations between images.

\para{Functional Space and Segmentation Functions.} We formulate image segmentation as computing indicator

\para{Probe Functions.}

\para{Functional Maps.}

\para{Image Network.}


\subsection{Approach overview}

\para{Initializing Functional Maps and Segmentation Functions.}

\para{Optimizing Functional Maps and Segmentation Functions.}

\para{Segmentation Propagation.}

\subsection{Functional Maps}

\para{Aligning Probe Functions.}
\begin{equation}
f_{ii'}^{\pair} = \sum\limits_{l=1}^{D}\|\bs{X}_{ii'}\bs{d}_{il} - \bs{d}_{i'l}\|.
\end{equation}

\para{Frequency Consistency.}

\begin{equation}
f_{ii'}^{\freq} = \sum\limits_{1\leq j, j'\leq m }(\lambda_{ij} - \lambda_{i'j'})^2 X_{ii'}(j,j')^2.
\end{equation}


\subsection{Segmentation Functions}

\section{Consistency $\Rightarrow$  Low-Rank}


Introduce the following map collection matrices that store the functional maps in blocks
$$
\bs{X} = \left[ \right]
$$

\begin{equation}
\bs{X} = \bs{Y}\bs{Z}^{T}.
\label{Eq:LowRank}
\end{equation}

\section{Consistent Functional Maps Among a Heterogenous Image Collection}

In this section, we present an optimization framework for computing consistent functional maps among a collection images that exhibit partial similarity between pairs of images, i.e., each pair of images shares a subset of objects. We first introduce the formulation in Section~\ref{Sec:Formu}. We then show how to effectively solve the optimization problem in Section~\ref{Sec:Opt}.

\input{consistentFmaps.tex}

\section{Optimizing Consistent Segmentations}

In this section, we describe how to compute the association between each image and each class, i.e., $\set{C}_{k}, 1\leq k \leq M$, and the segmentation functions $\bs{s}_{ik}, i \in \set{C}_{k}$ of the corresponding objects in each image. The procedure consists of an initialization stage, where we initialize the sparse sets of images of each class and the associated segmentations, and an alternating stage, which alternates between optimizing the segmentation functions and expanding the sets.

\input{optConsistentSeg.tex}

\section{Experiments}

\input{experiments.tex}

\section{Conclusion}
\input{conclusion}



{\small
\bibliographystyle{ieee}
\bibliography{bibs/segmentation,bibs/FunctionalMap,bibs/image_descriptors,bibs/imcorrespondence,bibs/jointSeg,bibs/diffusionMap}
}

\end{document}
