
%% bare_jrnl.tex
%% V1.3
%% 2007/01/11
%% by Michael Shell
%% see http://www.michaelshell.org/
%% for current contact information.
%%
%% This is a skeleton file demonstrating the use of IEEEtran.cls
%% (requires IEEEtran.cls version 1.7 or later) with an IEEE journal paper.
%%
%% Support sites:
%% http://www.michaelshell.org/tex/ieeetran/
%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/
%% and
%% http://www.ieee.org/



% *** Authors should verify (and, if needed, correct) their LaTeX system  ***
% *** with the testflow diagnostic prior to trusting their LaTeX platform ***
% *** with production work. IEEE's font choices can trigger bugs that do  ***
% *** not appear when using other class files.                            ***
% The testflow support page is at:
% http://www.michaelshell.org/tex/testflow/


%%*************************************************************************
%% Legal Notice:
%% This code is offered as-is without any warranty either expressed or
%% implied; without even the implied warranty of MERCHANTABILITY or
%% FITNESS FOR A PARTICULAR PURPOSE! 
%% User assumes all risk.
%% In no event shall IEEE or any contributor to this code be liable for
%% any damages or losses, including, but not limited to, incidental,
%% consequential, or any other damages, resulting from the use or misuse
%% of any information contained here.
%%
%% All comments are the opinions of their respective authors and are not
%% necessarily endorsed by the IEEE.
%%
%% This work is distributed under the LaTeX Project Public License (LPPL)
%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used,
%% distributed and modified. A copy of the LPPL, version 1.3, is included
%% in the base LaTeX documentation of all distributions of LaTeX released
%% 2003/12/01 or later.
%% Retain all contribution notices and credits.
%% ** Modified files should be clearly indicated as such, including  **
%% ** renaming them and changing author support contact information. **
%%
%% File list of work: IEEEtran.cls, IEEEtran_HOWTO.pdf, bare_adv.tex,
%%                    bare_conf.tex, bare_jrnl.tex, bare_jrnl_compsoc.tex
%%*************************************************************************

% Note that the a4paper option is mainly intended so that authors in
% countries using A4 can easily print to A4 and see how their papers will
% look in print - the typesetting of the document will not typically be
% affected with changes in paper size (but the bottom and side margins will).
% Use the testflow package mentioned above to verify correct handling of
% both paper sizes by the user's LaTeX system.
%
% Also note that the "draftcls" or "draftclsnofoot", not "draft", option
% should be used if it is desired that the figures are to be displayed in
% draft mode.
%
\documentclass[journal]{IEEEtran}
\usepackage{graphicx}

% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}
%
% paper title
% can use linebreaks \\ within to get better formatting as desired
\title{Group Project - Preliminary Report\\
			Unsupervised 3D Object Reconstruction and Visualisation}



\author{Dong Kim
        and Christian Mostegel\\
				Group \bf{\cdot}}

% note the % following the last \IEEEmembership and also \thanks - 
% these prevent an unwanted space from occurring between the last author name
% and the end of the author line. i.e., if you had this:
% 
% \author{....lastname \thanks{...} \thanks{...} }
%                     ^------------^------------^----Do not want these spaces!
%
% a space would be appended to the last name and could cause every name on that
% line to be shifted left slightly. This is one of those "LaTeX things". For
% instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
% "AB" then you have to do: "\textbf{A}\textbf{B}"
% \thanks is no different in this regard, so shield the last } of each \thanks
% that ends a line with a % and do not let a space in before the next \thanks.
% Spaces after \IEEEmembership other than the last one are OK (and needed) as
% you are supposed to have spaces between the names. For what it is worth,
% this is a minor point as most people would not even notice if the said evil
% space somehow managed to creep in.



% The paper headers
\markboth{COMP9517 - Computer Vision - University of New South Wales}%
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}




% make the title area
\maketitle


\IEEEpeerreviewmaketitle



\section{Introduction}
\IEEEPARstart{T}{he} fields of image-based modeling (IBM) and image-based rendering (IBR) are very active and long standing fields in computer vision. They are based in the centre of one of the two big schools in computer vision as they were described in one of the most important works in computer vision by David Marr 1982 \cite{marr82}; the reconstruction and the recognition school. The aim is to understand the world which was projected onto the image plane as well as the relation between images through reconstruction. In the last years there has been a lot of work in this field in relation to large scale online image collections \cite{snavely06}\cite{agarwal09}\cite{frahm10}, such as Google (www.google.com) or Flickr (www.flickr.com). The work of Snavely et al.\cite{snavely06} mainly focuses on establishing the relation between pictures of a specific landmark and enabling the user to intuitively browse the image collection with a sparse 3D representation of the scene. On the other hand, the work of Frahm et al.\cite{frahm10} aims to create a fully textured 3D model of the scene. Sinha et al.\cite{sinha09} try to refine a found sparse 3D representation with a piecewise planar approximation of the scene. \\
All of the mentioned recent approaches in this field have a feature-based matching step and a sparse bundle adjustment step at the beginning of the reconstruction process and are, in this matter, closely related to the work of Brown and Lowe \cite{brown05}. This common part is, depending on the literature, called structure and motion (SAM)\cite{brown05} or structure from motion \cite{snavely06}\cite{frahm10}. It copes with the challenge of estimating the 3D position of matched features and the camera parameters simultaneously using a given set images of the same object. A SIFT detector/descriptor \cite{lowe04} is used for feature matching. Then the fundamental matrix between to matched images is estimated with a RANSAC based method \cite{fischler87}. The fundamental matrix is either estimated with a 8-point algorithm as in \cite{hartley00} or with a 5-point algorithm \cite{nister04}. The bundle adjustment is then formulated as an minimization problem and solved with a Levenberg-Marquardt algorithm, such as \cite{lour09} or \cite{brown05}.
\\
Our projects aims to implement a IBM/IBR system which allows the user to browse large photo collections in a sparse 3D representation as in \cite{snavely06}.

The remaining document is structured as follows:
Section \ref{sec:goal} describes what will be the output of our project seen from a high-level perspective. Section \ref{sec:sa} will give details about software architecture and what algorithms we plan to use/implement.
Section \ref{sec:eval} describes which dataset we are planning to use for evaluation and how we will evaluate our implementation.
The last section gives detailed information about our project schedule and when we hope to achieve the different stages of the implementation.


\section{Project Goal}
\label{sec:goal}
In this project, a piece of software extracting 3D models from a photo images will be implemented. The software consists of two main modules. One module is the sparse 3D object representation engine. The main role of the engine is analysing multiple images in order to produce a 3D model of the object and reconstruct the relative positions from which the source pictures were taken. The second module handles the rendering of the created model and the camera as well as the user interaction. The user will be able to navigate through the world freely using the keyboard and it will be possible to select a view on the object by clicking on a camera in the 3D world. This will navigate the user's camera position into the position from which the picture he/she selected was taken.


\section{Software Architecture}
\label{sec:sa}

\subsection{Overview}
\label{ssec:overview}



\begin{figure}[h!]
  \centering
\includegraphics[width=0.5\textwidth]{overview.jpg} 
  \caption{System model of our project.}
  \label{fig:overview}
\end{figure}

Figure \ref{fig:overview} shows the overall architecture of the software. The Engine package is the one analyses input images and produces a point cloud and a list of cameras. Once the engine produces these outcomes, Visualiser can start rendering the result. Visualiser also deals with user interactions such navigation or selecting different perspective.

\subsection{Model Creation}
\label{ssec:model}
The model creation will be implemented with two frameworks, which are namely OpenCV \cite{opencv} and SBA \cite{lour09}.
OpenCV provides the I/O functionality and implementations of relevant key point detectors/descriptors (SIFT \cite{lowe04} and SURF \cite{bay08}) as well as a RANSAC \cite{fischler87} implementation to estimate the fundamental matrix for a given set of matched points.

In our implementation the user will be able to switch between SIFT and SURF detectors/descriptors. The matching will done by a simple brute force matcher with a radius nearest neighbour match. For early outlier rejection we will use "cross checking" which has proven useful in the panograph task. "Cross checking" means that the key points will be matched from image $A$ to image $B$ and vice versa. Only the key points that are nearest neighbours in both directions will be considered.
The fundamental matrix will then be calculated with the RANSAC implementation provide by OpenCV.
The 3D points and the camera parameters will be optimized in a further step using a Levenberg-Marquardt implementation provided by Lourakis and Argyros\cite{lour09}.

The final output of this engine will be a 3D point cloud and the camera parameters (translation $[t_1,t_2,t_3]$, rotation $[\theta_1,\theta_2,\theta_3]$, and the focal length $f$ as proposed in \cite{brown05}).
A 3D point will have a link to all images they are visible in to facilitate the visualisation and the user inter action.

\subsection{Visualisation and  User Interaction}
\label{ssec:gui}

In the visualisation package, graphical user interface (GUI) and user interaction functionalities are provided. The GUI feature includes visualising the point cloud and cameras in a three dimensional (3D) space. Users can also interact with the system so they can navigate the 3D space or simply select another camera view presented in the screen. Since the point cloud is constructed in 3D, this feature is necessary to improve readability and visibility of the result in 3D space. This package will use the frameworks OpenGL \cite{opengl} and Qt \cite{qt}.

\section{Evaluation}
\label{sec:eval}
For the evaluation we have chosen the data set of community collections \cite{goesele07}. It contains the dataset of the Notre-Dame which is very similar to the dataset used in \cite{snavely06}. As this work provides an applet of their result, it will allow us to directly compare the quality of our implementation.
\\
Furthermore we will directly compare the SIFT and SURF detectors/descriptors in matters of computational load and the ratio of correct matches to false matches. This is done to evaluate if it is justified that all recent approaches use the SIFT and none the SURF algorithm.

\section{Project Plan}
\label{sec:plan}
\begin{table}[h]
\begin{center}
	\begin{tabular}{|c|c|c|}
\hline
		Week & \multicolumn{2}{|c|}{Tasks}\\\hline\hline
		9 & \multicolumn{2}{|c|}{Background research } \\
	& \multicolumn{2}{|c|}{
Software architecture design}\\\hline
		10 & Image matching  & 3D rendering \\
				&and fundamental matrix & \\\hline
		11 & Sparse bundle adjustment & User interaction \\\hline
		12 & \multicolumn{2}{|c|}{Testing} \\\hline
		13 & \multicolumn{2}{|c|}{Final Demo} \\\hline

	\end{tabular}
\end{center}
\caption{Project Plan.}
	\label{tab:project_plan}
\end{table}

Table \ref{tab:project_plan} shows the work schedule of our projects and the sub goals we want to achieve in the specified week. The first week will be used to refine our background research and design the software project.
In week 10 and 11 we reserved for the implementation. As we can neatly divide the project in two packages, we will implement both parts in parallel to speed up the development process. In week 12 we will test the combined implementation.
% and "HIS" in caps to complete the first word.



% Can use something like this to put references on a page
% by themselves when using endfloat and the captionsoff option.
\ifCLASSOPTIONcaptionsoff
  \newpage
\fi



\begin{thebibliography}{1}
\bibitem{opencv} OpenCV (Open Source Computer Vision), http://opencv.willowgarage.com.

\bibitem{opengl} OpenGL (Open Graphics Library) , Silicon Graphics International Corp., http://www.opengl.org.

\bibitem{qt} Qt framework, Nokia Corporation,
http://qt.nokia.com.

\bibitem{marr82}
D. Marr, \emph{Vision. A Computational Investigation into the Human Representation and Processing of Visual Information}, W.H. Freeman and Company,1982.

\bibitem{fischler87}
M. Fischler and R. Bolles. \emph{Random sample consensus:
a paradigm for model fitting with applications to image
analysis and automated cartography}, Readings in computer vision:
issues, problems, principles, and paradigms, 726-740, 1987.

\bibitem{hartley00}
R. Hartley and A. Zisserman, \emph{Multiple View Geometry
in Computer Vision}, Cambridge University Press, ISBN:
0521623049, 2000.

\bibitem{nister04}
D. Nist\'{e}r \emph{An efficient solution to the five-point relative pose problem}, PAMI 26, 756-770, 2004.

\bibitem{lowe04}
D. Lowe, \emph{Distinctive image features from scale-invariant keypoints}, IJCV, 60, 91-110, 2004

\bibitem{brown05}
M. Brown and D. Lowe, \emph{Unsupervised 3D object recognition and reconstruction in unordered datasets}, International Conference on 3-D Digital Imaging and Modeling (3DIM 2005), Ottawa, Canada, June 2005.

\bibitem{snavely06}
N. Snavely, S. Seitz, R. Szeliski, \emph{Photo tourism: Exploring photo collections in 3D}, ACM Transactions on Graphics (SIGGRAPH Proceedings), 25(3), 835-846, 2006.

\bibitem{goesele07}
M. Goesele, N.Snavely, B. Curless, H. Hoppe, S. Seitz, \emph{Multi-View Stereo for Community Photo Collections},
Proceedings of ICCV 2007, Rio de Janeiro, Brasil, October 14-20, 2007. 

\bibitem{bay08}
H. Bay, A. Ess, T. Tuytelaars, L. Van Gool, \emph{SURF: Speeded Up Robust Features}, Computer Vision and Image Understanding (CVIU), Vol. 110, No. 3, pp. 346-359, 2008.

\bibitem{agarwal09}
S. Agarwal, N. Snavely, I. Simon, S. Seitz and R. Szeliski,\emph{Building Rome in a Day},
International Conference on Computer Vision, 2009, Kyoto, Japan.

\bibitem{lour09}
M. Lourakis and A. Argyros, \emph{SBA: A Software Package for Generic Sparse Bundle Adjustment}, ACM Trans. Math. Software, New York, NY, USA, 36(1), 1-30, 2009.

\bibitem{sinha09}
S. Sinha, D. Steedly and R. Szeliski, \emph{Piecewise Planar Stereo for Image-based Rendering},Twelfth IEEE International Conference on Computer Vision (ICCV 2009), 2009.

\bibitem{frahm10}
J.-M. Frahm, P. Georgel, D. Gallup, T. Johnson, R. Raguram, C. Wu, Y.-H. Jen, E. Dunn, B. Clipp, S. Lazebnik, and M. Pollefeys, \emph{Building Rome on a Cloudless Day.},
Proceedings of the European Conference on Computer Vision, 2010.





\end{thebibliography}








% that's all folks
\end{document}


