
%% bare_jrnl.tex
%% V1.3
%% 2007/01/11
%% by Michael Shell
%% see http://www.michaelshell.org/
%% for current contact information.
%%
%% This is a skeleton file demonstrating the use of IEEEtran.cls
%% (requires IEEEtran.cls version 1.7 or later) with an IEEE journal paper.
%%
%% Support sites:
%% http://www.michaelshell.org/tex/ieeetran/
%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/
%% and
%% http://www.ieee.org/



% *** Authors should verify (and, if needed, correct) their LaTeX system  ***
% *** with the testflow diagnostic prior to trusting their LaTeX platform ***
% *** with production work. IEEE's font choices can trigger bugs that do  ***
% *** not appear when using other class files.                            ***
% The testflow support page is at:
% http://www.michaelshell.org/tex/testflow/


%%*************************************************************************
%% Legal Notice:
%% This code is offered as-is without any warranty either expressed or
%% implied; without even the implied warranty of MERCHANTABILITY or
%% FITNESS FOR A PARTICULAR PURPOSE! 
%% User assumes all risk.
%% In no event shall IEEE or any contributor to this code be liable for
%% any damages or losses, including, but not limited to, incidental,
%% consequential, or any other damages, resulting from the use or misuse
%% of any information contained here.
%%
%% All comments are the opinions of their respective authors and are not
%% necessarily endorsed by the IEEE.
%%
%% This work is distributed under the LaTeX Project Public License (LPPL)
%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used,
%% distributed and modified. A copy of the LPPL, version 1.3, is included
%% in the base LaTeX documentation of all distributions of LaTeX released
%% 2003/12/01 or later.
%% Retain all contribution notices and credits.
%% ** Modified files should be clearly indicated as such, including  **
%% ** renaming them and changing author support contact information. **
%%
%% File list of work: IEEEtran.cls, IEEEtran_HOWTO.pdf, bare_adv.tex,
%%                    bare_conf.tex, bare_jrnl.tex, bare_jrnl_compsoc.tex
%%*************************************************************************

% Note that the a4paper option is mainly intended so that authors in
% countries using A4 can easily print to A4 and see how their papers will
% look in print - the typesetting of the document will not typically be
% affected with changes in paper size (but the bottom and side margins will).
% Use the testflow package mentioned above to verify correct handling of
% both paper sizes by the user's LaTeX system.
%
% Also note that the "draftcls" or "draftclsnofoot", not "draft", option
% should be used if it is desired that the figures are to be displayed in
% draft mode.
%
\documentclass[journal]{IEEEtran}
\usepackage{amsmath}

\usepackage{graphicx}
\usepackage{algorithmic}

% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}
%
% paper title
% can use linebreaks \\ within to get better formatting as desired
\title{Group Project - Final Report\\
			Unsupervised 3D Object Reconstruction and Visualisation}

\author{Dong Back Kim (z3299777)
        and Christian Mostegel (z3387217)\\
				Group \bf{\cdot}}


% note the % following the last \IEEEmembership and also \thanks - 
% these prevent an unwanted space from occurring between the last author name
% and the end of the author line. i.e., if you had this:
% 
% \author{....lastname \thanks{...} \thanks{...} }
%                     ^------------^------------^----Do not want these spaces!
%
% a space would be appended to the last name and could cause every name on that
% line to be shifted left slightly. This is one of those "LaTeX things". For
% instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
% "AB" then you have to do: "\textbf{A}\textbf{B}"
% \thanks is no different in this regard, so shield the last } of each \thanks
% that ends a line with a % and do not let a space in before the next \thanks.
% Spaces after \IEEEmembership other than the last one are OK (and needed) as
% you are supposed to have spaces between the names. For what it is worth,
% this is a minor point as most people would not even notice if the said evil
% space somehow managed to creep in.



% The paper headers
\markboth{COMP9517 - Computer Vision - University of New South Wales}%
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}




% make the title area
\maketitle

\begin{abstract}

In this project, a unsupervised 3D object reconstruction technique using various computer vision algorithms are implemented and visualised. For key point and feature detections, Scale-invariant feature transform (SIFT) algorithm and Speeded Up Robust Feature (SURF) algorithm are benchmarked and used. Those key points are matched by using Cross-checking algorithm and kd-Tree based search algorithm. With this matched key points, the fundamental matrix can be calculated and the point cloud and cameras can be extracted from a list of images. In order to improve the quality of the point cloud, various techniques such as outlier rejection and bundle adjustment algorithms are applied. Then the point cloud and cameras are visualised by using OpenGL and the Qt framework.

\end{abstract}

\IEEEpeerreviewmaketitle

\begin{figure}
\includegraphics[width=0.5\textwidth]{Main.png} 
  
	\caption{Test result of 3 images of UNSW Quadrangle Sundial}
\label{fig:test-main}
\end{figure}

\section{Introduction and Related Work}
\IEEEPARstart{T}{he} fields of image-based modelling (IBM) and image-based rendering (IBR) are very active and long standing fields in computer vision. They are based in the centre of one of the two big schools in computer vision as they were described in one of the most important works in computer vision by David Marr 1982 \cite{marr82}; the reconstruction and the recognition school. The aim is to understand the world which was projected onto the image plane as well as the relation between images through reconstruction. In the last years there has been a lot of work in this field in relation to large scale online image collections \cite{snavely06}\cite{agarwal09}\cite{frahm10}, such as Google (www.google.com) or Flickr (www.flickr.com). The work of Snavely et al.\cite{snavely06} mainly focuses on establishing the relation between pictures of a specific landmark and enabling the user to intuitively browse the image collection with a sparse 3D representation of the scene. On the other hand, the work of Frahm et al.\cite{frahm10} aims to create a fully textured 3D model of the scene. Sinha et al.\cite{sinha09} try to refine a found sparse 3D representation with a piecewise planar approximation of the scene. \\
All of the mentioned recent approaches in this field have a feature-based matching step and a sparse bundle adjustment step at the beginning of the reconstruction process and are, in this matter, closely related to the work of Brown and Lowe \cite{brown05}. This common part is, depending on the literature, called structure and motion (SAM)\cite{brown05} or structure from motion \cite{snavely06}\cite{frahm10}. It copes with the challenge of estimating the 3D position of matched features and the camera parameters simultaneously using a given set images of the same object. The approaches \cite{snavely06}\cite{frahm10} use additional geo information to initialise the position and rotation of new cameras. As an initial guess for the intrinsic camera parameters both approaches use the EXIF header information, where available, to get an initial guess for the focal length. A SIFT detector/descriptor \cite{lowe04} is used for feature matching. Then the fundamental matrix between to matched images is estimated with a RANSAC based method \cite{fischler87}. The fundamental matrix is either estimated with a 8-point algorithm as in \cite{hartley00} or with a 5-point algorithm \cite{nister04}. The point correspondences can then be project into the 3D world via estimation of the shared perpendicular of the back projected rays \cite{beardsley97}.The bundle adjustment is then formulated as an minimisation problem and solved with a Levenberg-Marquardt algorithm, such as \cite{lour09}, \cite{brown05} or \cite{zach11}.
\\
Our project aimed to implement a IBM/IBR system which allows the user to browse large photo collections in a sparse 3D representation as in \cite{snavely06}. We succeeded in implementing a software system that allows the user to create a 3D model based on a set of images with some restrictions on the initial pair. The scalability of our approach like the approach of Snavely et al. turned out to be limited by the quadratic run-time.

The remaining document is structured as follows. In section \ref{sec:goal}, the definition and scope of the project are described. In section \ref{sec:decomposition}, the decomposition of the problem is explained. In this section, the entire problem is separated into two main components - one for 3D model reconstruction and another for visualisation and interaction with the 3D model. In section \ref{sec:id}, details of the implementations of each components mentioned in section \ref{sec:decomposition} are provided. In this section, details of each algorithms are described and explained. In section \ref{sec:eval}, performance and qualities of different algorithms are compared and analysed. Moreover, the test result against a number of data sets are included in this section. In section \ref{sec:cns}, the conclusion from all the test, experiments and implementation is provided. Moreover, suggestions and recommendations are also included for further research and developments. In section \ref{sec:sc}, a table showing the contribution of each team member is presented.


\section{Project Goal}
\label{sec:goal}
In this project, a piece of software extracting 3D models from a collection of images is to be implemented. The software consists of two main modules. One module is the sparse 3D object representation engine. The main role of the engine is analysing multiple images in order to produce a 3D model of the object and reconstruct the relative positions from which the source pictures were taken. The second module handles the rendering of the created model and the camera as well as the user interaction. The user should be able to navigate through the world freely using the keyboard and mouse. Furthermore, all images that are integrated in the system are viewable in a separate part of the GUI.

\section{Problem Decomposition}
\label{sec:decomposition}
The project can be divided into two separate and independent parts; the 3D model creation and the user visualisation and interaction. As this is a computer vision class, significantly more effort was put into the first part. The rest of the section will briefly outline all the subtasks involved in the 3D model creation and the visualisation.

\subsection{3D Model Creation}
\label{ssec:3d_model_creation}
The goal is to retrieve a sparse 3D object representation via point cloud. To achieve this, key points have to be extracted and matched. For this initial matching we implemented two different algorithms; kd-Tree and Cross-check Matching. Those matches do not yet have a geometrical meaning and are solely based on local similarity without any constraints.
\\
Geometrical consistency can be achieved in robustly estimating the fundamental matrix between two images. The matches that are consistent with the epipolar geometry of the estimated fundamental matrix are very likely to be real world correspondences.\\
In a further step, one has to organise consistent matches across larger sets of images to find the relation between the images in the set. We organised these matches in tracks as it was proposed by Snavely et al. \cite{snavely06}. \\
Now the relation between the images is defined through the geometrically consistent tracks. The next problem is to convert this relation into a 3D model. As this is a large-scale non-linear problem with many degrees of freedom, a closed form solution cannot be directly obtained. If it would be directly applied to an optimisation scheme like Levenberg-Marquardt, it is very likely to get stuck at a bad local minimum\cite{snavely06}. Thus, recent approaches like \cite{snavely06}\cite{frahm10}, initialise the 3D model with a single pair of images. To find this initial pair \cite{snavely06,frahm10} use heuristics based on the number of matching feature points and the relative baseline. Both approaches use real world geometric information where available. Although not explicitly stated in the papers, the lack of specifics on the topic of the initial pair initialisation leads to the assumption that the pair was chosen also subject to if additional geometric information was available or not.\\
With the resulting initial guess for the camera positions, it is now possible to estimate the 3D location of the point correspondences. As the projected correspondences are very unlikely to cross in the 3D space, we estimate the mid-point of the shared perpendicular as suggested by Beardsley et al. \cite{beardsley97}.\\
To improve this vague guess of the 3D object, we use the bundle adjustment package SSBA \cite{zach11}. It optimises the camera and 3D point locations at the same time in an iterative optimisation process which is widely known under the name Levenberg-Marquardt algorithm.
After the 3D model has been initialised, further cameras can be added more easily either by simple initialisation with the parameters of the nearest neighbour \cite{brown05} or with a direct linear transform \cite{snavely06}. Note that the direct linear transform is based on the idea of a dominant plane in the 3D model.


\subsection{Visualisation and User Interaction}


The main goal of this part is to provide an easy and intuitive user interaction system so users can experience the 3D model reconstructed in this application. Moreover, having navigation interface for the 3D space is also very useful and critical for testing the outcome of algorithms. OpenGL \cite{opengl} provides basic functionalities for rendering object into 3D space including basic matrix based Euclidean 3D space transformation such as rotation, translation and scaling. Since the outcomes of 3D reconstruction algorithms are a list of points in 3D or a point cloud, OpenGL is an easier way to visualise the points into 3D space and therefore chosen as base technology for visualisation.


Base application runtime framework is Qt framework \cite{qt}. Since Qt framework supports standard C++ programming language, it interoperates with OpenCV \cite{opencv} and OpenGL \cite{opengl} well. Moreover, Qt framework allows application runs based on event-driven approach so any type of user interaction can be easily captured by the application. Mouse and keyboard interactions are used in this application. Mouse is used to change the particular view image or zooming in or out the screen. Keyboard is used to rotate the space. Arrow keys are used to represent these 3D rotation because it is intuitive and straightforward.


In order to adjust the viewing angle of the 3D model, the origin of the world has to be defined. In this application, a simple average calculation for points in 3D space is used to calculate the origin of the world based on the point cloud.

\begin{equation}
Origin(x,y,z)=\frac{\sum\limits_{i=0}^n P(x_i,y_i,z_i)}{n}
\label{eq:origin}
\end{equation}

Equ. \ref{eq:origin} shows the function to find out the origin of the point cloud. It is possible that if there are few outlier points located ridiculously far away from the majority of the points in the point cloud then the origin found by this function will be incorrect. However, since those outliers are eliminated by outlier rejection algorithm used in 3D reconstruction module, origin of point cloud is generally within an acceptable range.

In this application, user can rotate the point cloud along to XYZ axis. Since those rotation transformation can be easily done by OpenGL \cite{opengl}, such description of rotation transformation in 3D Euclidean space is omitted in this paper.



\section{Implementation Details}
\label{sec:id}

\subsection{Overview}
\label{ssec:overview}



\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{overview.jpg} 
  \caption{System model of our project.}
  \label{fig:overview}
\end{figure}

Figure \ref{fig:overview} shows the overall architecture of the software. The Engine package is the one analyses input images and produces a point cloud and a list of cameras. Once the engine produces these outcomes, Visualiser can start rendering the result. Visualiser also deals with user interactions such as navigation or selecting different perspective.

\subsection{3D Model Creation}
\label{ssec:model}
The model creation is implemented with two open source libraries, which are namely OpenCV \cite{opencv} and SSBA \cite{zach11}.
OpenCV provides the I/O functionality and implementations of relevant key point detectors/descriptors (SIFT \cite{lowe04} and SURF \cite{bay08}) as well as a RANSAC \cite{fischler87} implementation to estimate the fundamental matrix for a given set of matched points. The SSBA library is used for sparse bundle adjustment via Levenberg-Marquardt optimisation scheme.

\subsubsection{Feature Detection}
The feature detection is done with OpenCV.
Our implementation makes it very easy to switch between SIFT and SURF detectors/descriptors in simply setting the right flag. Those two key point detection and feature extraction systems were evaluated for the task of sparse 3D reconstruction.

\subsubsection{Feature Matching}
Matching is mostly done by approximate nearest neighbour approaches such as \cite{arya98} and the kd-Tree based approach\cite{beis97} to increase the speed of this stage.
For feature matching we implemented two different algorithms; Cross-check Matching and kd-Tree. 
kd-Tree is a algorithm allows constructing multidimensional binary search tree \cite{bentley75}\cite{CLRS} and Cross-check Matching algorithm is a simple brute-force based matching algorithm. Both algorithms are newly implemented during the software development session of this project. kd-Tree is implemented in order to improve the overall software performance because Cross-check Matching algorithm is very expensive in terms of computing power. kd-Tree algorithm's performance for construction and searching, like other binary search tree algorithm, is $O(n \log{n})$ whereas Cross-check Matching algorithm is $O(n^2)$. However, since the result of kd-tree based matching algorithm shows a very poor matching result, the Cross-check Matching algorithm is only one used for the further processes. Before performing the Cross-check Matching algorithm, it is assumed that the descriptors of the two images are already prepared by using either SIFT or SURF algorithm. The following shows the pseudo code of Cross-check Matching algorithm.

\caption{Cross Check Matching:}
\label{CCM}
\begin{algorithmic}

\STATE resultA = match(image1.descriptors, image2.descriptors)
\STATE resultB = match(image2.descriptors, image1.descriptors)

\FOR{each match m1 in resultA}

\IF{$m1.size() \geq 1$} 
	\STATE filterA.append(m1)
\ENDIF

\ENDFOR

\FOR{each match m2 in resultB}

\IF{$m1.size() \geq 1$} 
	\STATE filterB.append(m2)
\ENDIF

\ENDFOR

\FOR{each match ma in filterA}
\FOR{each match mb in filterB}
	\IF{ma.queryIdx $==$ mb.tranIdx \\AND ma.trainIdx $==$ ma.queryIdx}
		\STATE commonMatch.append(ma)
	\ENDIF
\ENDFOR
\ENDFOR

\end{algorithmic}

Cross-checking matching performs a cross validating of matched descriptors once more to filter out unwanted matches. For example, result of image matching of "A and B" can differ from the result of "B and A". If there is any difference in the matching, it means they are not reliable match points for further process. It has been observed over testing that these points are the ones that have a high potential to cause confusions in RANSAC. The cross checking eliminates those mismatched ones from the list and enforces a strict bidirectional one-to-one relation between the key points.
\subsubsection{Fundamental matrix estimation and track creation}
\label{sssec:f}
In our approach we use the fundamental matrix estimation as a mean to impose geometric constraints on the matches found in the previous step. This is done with the RANSAC based implementation that is available in OpenCV\cite{opencv}. In the process of estimating the fundamental matrix the RANSAC algorithm finds matches that are consistent with the epipolar constraints of the fundamental matrix.\\
The fundamental matrix defines the relation between a pair of images, but as we are interested in integrating a multitude of images into the model, we have to find point correspondences across a larger set of images. Therefore, we organise our key points in tracks as proposed by Snavely et al. \cite{snavely06}. Following their approach we also reject apparently inconsistent tracks that have more than one key point in one image. A track can be seen as a representation of a real world 3D point and its projections in the images of the cameras which observe this point.
\subsubsection{Initial pair selection}
Large scale photo-collection reconstruction approaches use geographical information such as GPS-systems and electronic compasses \cite{snavely06} or geo-tag information \cite{frahm10} to get an initial guess on the camera positions. The initial pair is then selected such that there is a large number of matches between this image pair. As images taken from the exact same location tend to have the highest number of correspondences further heuristics have to be applied; otherwise the matching pair would not hold any disparity and therefore no depth information. Snavely et al. \cite{snavely06} select the initial image pair such that it has additionally a wide baseline and the 3D object cannot be well modelled by a single homography. As we unfortunately did not have any geo information available for our images, we simply decided to let the user select the initial pair.
\subsubsection{Initialisation of the first camera pair}
Although no paper addresses it explicitly, this problem is a very ill-posed task if no prior knowledge is available. The finding of the initial camera parameters as well as the 3D point correspondences is a non-linear problem that has too many degrees of freedom to be initialised unambiguously. To reduce the complexity of this problem, many recent approaches \cite{snavely06}\cite{frahm10}\cite{li08}\cite{roberts11} use the EXIF header information of images if available, to initialise the intrinsic parameters and \cite{snavely06}\cite{frahm10} also use geo information where available.
As we discovered, it is crucial to have a reasonable initial guess for the object shape in order for the bundle adjustment optimisation not to get stuck on a very bad minimum. Frahm et al. \cite{frahm10} use real world geo-location tags of image collections to initialise the position of the cameras. As the complexity of the automated initial pair selection without any prior geometric knowledge would fit a proper project on its own, we settled for letting the user  select the initial pair. We assume that a focal length guess is given. At the current implementation we simply set it hard-coded to a reasonable value ($f=1800$). We planned to read the EXIF header for this information, but unfortunately ran out of time for this. The only assumption that we have for the initial pair is that the images have a very low relative rotation to each other, are only translated along the x-axis and have a similar focal length. This might seem as a rather drastic restriction, but experiments have shown that bundle adjustment can recover the relative pose, even when the constraints are significantly violated.
\subsubsection{Initialisation of 3D points}
With the initialisation of the first camera pair it is possible to estimate the 3D location of the point correspondences. Every point in the image plane defines a 3D line through the point and the camera centre. In theory it would be enough to intersect the two image rays in the 3D world. In practise the point correspondences cannot be determined with sufficient accuracy to intersect the lines in 3D. Due to noise and quantisation the back projected image rays will be skewed and not meet in the 3D world. Therefore we estimate the mid-point of the shared perpendicular as proposed by Beardsley et al. \cite{beardsley97}. The approach works as follows:\\
Let $K_i$ the intrinsic camera matrix of the camera $i$ be defined as:
\begin{equation}
	K_i =  \begin{bmatrix}
       f_i & 0 & u_i  \\[0.3em]
       0 & f_i & v_i \\[0.3em]
       0 & 0   & 1
     \end{bmatrix}
\end{equation}
where $f_i$ is the focal length of the camera $i$ and $[u_i,v_i]^T$ is the principal point offset, which we assume in the image centre.\\
Let the projection matrix $P_i$ be defined as:
\begin{equation}
	P_i = K_i \cdot [R_i | t_i']
\end{equation}
where $R_i$ is the $3$x$3$ rotation matrix with 3 degrees of freedom and let $t_i'$ be called the projective translation vector.\\
The projection $x_i^j$ of a 3D point $X_j$  into image $i$ is then defined as:
\begin{equation}
	x_i^j = P_i \cdot X_j
\end{equation}

\paragraph{step 1}
Normalise all key points $x_i$ in image $i$ with the estimated camera matrix $K_i^*$:\\
\begin{equation}
	\tilde{x_i} = (K_i^*)^{-1} \cdot x_i
\end{equation}
Now let the remaining projection matrix be denoted as $\tilde{P_i}$. The projection on the normalised key points $\tilde{x_i^j}$ can then be written as:
\begin{equation}
	\tilde{x_i^j} = (K_i^*)^{-1} \cdot P_i \cdot X_j = \tilde{P_i} \cdot X_j
\end{equation}
The remaining projection  matrix can then be decomposed into:
\begin{equation}
	\tilde{P_i} = [R_i|-R_i\cdot t_i]
\end{equation}
where $R_i$ is the rotation matrix and $t_i$ is the camera centre in the world coordinates of the 3D model.
\paragraph{step 2}
Define the 3D line through the camera centre and the intersection of the back projected point with the plane in infinity :
\begin{equation}
	X_i^\infty = \left(
    \begin{array}{c}
      R_i^{-1}\cdot \tilde{x_i}\\
				0
    \end{array}
  \right)
 = 
\left(
    \begin{array}{c}
 				D_i\\
				0
    \end{array}
  \right)
\end{equation}

Then the unknown 3D point $X$ lies the back projected image rays and can be written as:
\begin{equation}
	X = \left(
    \begin{array}{c}
      t_i\\
				1
    \end{array}
  \right)
+ \alpha_i\cdot
\left(
    \begin{array}{c}
 				X_i^\infty\\
				0
    \end{array}
  \right)
\label{eq:ray}
\end{equation}
where $\alpha_i$ is an unknown parameter encoding the depth. Note that Equation \ref{eq:ray} is valid iff the point in visible from camera $i$.
\paragraph{step 3}
Calculate the mid point of the shared perpendicular:
\begin{equation}
	X_M = \Bigg( \sum_{i = 1}^n I - D_i\cdot D_i^T \Bigg)^{-1}    \cdot 
\Bigg(\sum_{i = 1}^n t_i -    \sum_{i = 1}^n  (t_i^T\cdot D_i)\cdot D_i  \Bigg)
\label{eq:project}
\end{equation}
where $I$ is the identity matrix and $n$ is the number of cameras that observe point $X$. For our implementation $n$ is always 2, because we used it only to initialise new 3D points. Only in the evaluation of SIFT and SURF (Subsection \ref{sift_vs_surf}) $n$ corresponds truly to the number of cameras that observe a point.

\subsubsection{Outlier rejection}

\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{matches_small.jpg} 
  \caption{This figure illustrates that not all matches that are consistent with the fundamental matrix have to be valid matches. At the margins of the images matches are found, although the images do not overlap in this area. }
  \label{fig:matches}
\end{figure}

The step in Subsubsection \ref{sssec:f} should enforce geometrical consistency. But the fundamental matrix only defines epipolar lines in the corresponding image. This is illustrated in Figure \ref{fig:matches}. If this image contains repetitive patterns on this, the system is prone to evaluate not to be able to distinguish right from wrong matches as they all might consistent with the fundamental matrix. As buildings often contain such repetitive structures and the iconic scene reconstruction mainly focuses on buildings, we decided to use this knowledge to remove obviously wrong matches. We estimate the major plane in the image in calculating the homography between the image pair. The key points that are consistent with the RANSAC based homography estimation of OpenCV, are interpreted to be part of the plane of interest in the image. In a next step we calculate the distance from the centre of this plane to the camera that is farthest away from the this centre.
All 3D points that are farther away from the centre of the plane than the farthest camera are treated as outliers and removed from the model. This step is needed in the example in Figure \ref{fig:matches}, because otherwise the bundle adjustment would not converge to a satisfying result.
\subsubsection{Bundle adjustment}
Bundle adjustment is a widely used procedure \cite{brown05}\cite{sinha09}\cite{li08}\cite{frahm10} in 3D reconstruction to refine the 3D point estimation as well as the projection matrices at same time. The Levenberg-Marquardt algorithm turns out to be well suited for the task and so algorithms and implementations seem to be based on it \cite{brown05}\cite{lour09}\cite{zach11}. To increase the speed of this iterative optimisation procedure the mentioned approaches make use of the sparsity of the problem. In our implementation we used the SSBA library\cite{zach11}. The cameras in our work have 7 degrees of freedom. 1 for the focal length, 3 for the rotation and 3 for the translation. The principal point and the lens distortion parameters are not optimised. The 3D points 
\subsubsection{Integration of further cameras}
To integrate new cameras we follow the approach of Brown and Lowe \cite{brown05}. We initialise a new camera with the intrinsic and extrinsic parameters of its nearest neighbour. The nearest neighbour  in our case is defined as the image that has the highest number of shared tracks. Then bundle adjustment is applied to find the real location of the new camera. When this refinement is finished we can project new 3D points and remove the outliers as described above.

\subsection{Visualisation and  User Interaction}
\label{ssec:gui}

In the visualisation package, graphical user interface (GUI) and user interaction functionalities are provided. The GUI feature includes visualising the point cloud and cameras in a three dimensional (3D) space. Users can also interact with the system so they can navigate the 3D space or simply select another camera view presented in the screen. Since the point cloud is constructed in 3D, this feature is necessary to improve readability and visibility of the result in 3D space. This package uses the frameworks OpenGL \cite{opengl} and Qt \cite{qt}.
\\

There are a number of techniques used in this implementation in order to improve the user experience. First of all, the unit scale for rendering the point cloud is automatically adjusted. Without this feature, it can be very difficult to understand the size or distribution of the points in the point cloud because it can be too large or too small which either way user may fail to see a point. Second, the world coordinate is changed to the centre of the point cloud. In order to examine how the 3D model is constructed in different angles, the world coordinate must be translated to the centre of the 3D model instead of absolute orgin. This allows user can rotate the 3D model along the any of XYZ axis and yet focus on the 3D model. Other than the basic 3D rendering functions provided by OpenGL, all other transformation, perspective adjustment based on images and point cloud functionalites are entirely implemented during the development period by the team without any involvement of other third part libraries or rendering engines.


\section{Evaluation}
\label{sec:eval}

\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{time.pdf} 
  \caption{Comparison of SIFT and SURF in matters of key point detection time and feature extraction time.}
  \label{fig:time}
\end{figure}

\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{ransac-ratio.pdf} 
  \caption{Ratio between matches that are consistent with the RANSAC estimation of the fundamental matrix and the total number of cross-checked feature matches.}
  \label{fig:ransac-ratio}
\end{figure}

\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{ratio.pdf} 
  \caption{Comparison of the ratio $outliers/inliers$ of SIFT and SURF.}
  \label{fig:ratio}
\end{figure}

\begin{figure}[h!]
  
\includegraphics[width=0.5\textwidth]{tracksize.pdf} 
  \caption{Average track size for different number of images for outliers and inliers.}
  \label{fig:tracksize}
\end{figure}

Our evaluation can be divided into two parts. The first part is to evaluate the quality of our approach on several small sized datasets. The datasets were chosen rather small because the 3D reconstruction is computationally expensive and has a quadratic run time as does the approach of Snavely et al.\cite{snavely06}. The second part does not evaluate our approach, but is a direct comparison of performance and the computing time of the SIFT and SURF systems. This part was evaluated because we could not find any comparison of the feature detection system in the domain of object reconstruction, but all works we found used SIFT rather than SURF.

\subsection{Testing and Results}

\begin{figure}
\includegraphics[width=0.5\textwidth]{emblem.png} 
	\caption{Test result of 5 images of UNSW emblem}
\label{fig:test-emblem}
\end{figure}

\begin{figure}
\includegraphics[width=0.5\textwidth]{norte_dame.png} 
  
	\caption{Test result of 10 images from the Norte Dame data set}
\label{fig:test-norte-dame}
\end{figure}

\begin{figure}
\includegraphics[width=0.5\textwidth]{pilars.png} 
  	\caption{Test result of 3 images from the temple data set}
\label{fig:test-pilars}
\end{figure}

For the evaluation we chose a mix of already existing and self acquired datasets. We have chosen the TempleRing dataset\cite{seitz06} as a very simple dataset without background clutter. The property that the ground truth projection matrices and a bounding box were given helped us to find errors in our implementation. It also showed us that the assumption we made in the camera initialisation can be violated to a certain extend without the system collapsing. We assume that the initial pair of images are not rotated relative to each other and that camera have only a x-axis translation to each other. Furthermore, we assume that the initial length guess is not too far from the real focal length. This dataset violates all of these restrictions. All cameras that are ordered in a circle face the centre of the object. They all have no x-axis translation but mainly y-axis translation. As they all face the centre there is significant rotation between each pair of cameras. Further, the focal length was initialised with $f = 1800$ whereas the ground truth focal length is $f=1500$. Although all the restrictions were violated our approach still managed to recover the 3D model successfully, if we initialised it with a pair of cameras that were next to each other. If we initialised it with a pair that was not directly adjacent our approach failed. We discovered that the initialisation is crucial for the approach to work; no matter which dataset. If the initialisation is bad bundle adjustment gets stuck on a bad minimum. \\
We also tested our approach on a subset of 10 images of the Notre Dame dataset \cite{snavely06}. As we had no geometrically prior knowledge about the cameras, we initialised the first camera pair with the camera matrices that were provided by the Microsoft research group. We did not use any of the provided information, which means that all parameters of the other 8 cameras as well as the 3D points were recovered by our algorithm. The output is reasonably well considering that we only used ten images, which contain 2 images with significant occlusion and view point changes. We used the images with the indices (116,297| 21,22,75,81,260,306,414,517) for the evaluation. The first two were used for the initialisation.\\

We acquired 2 datasets on our own, which are namely UNSW emblem (Figure \ref{fig:test-emblem}) and the UNSW quadrangle sundial (Figure \ref{fig:test-main}). The second dataset consisted of 3 images without significant rotation along the y-axis, where as the first data, which consists of 5 images, 2 strongly rotated images. Both objects could be successfully reconstructed.


\subsection{SIFT vs. SURF}
\label{sift_vs_surf}

All recent approaches seem to prefer SIFT \cite{lowe04} over SURF \cite{bay08}; those approaches include \cite{snavely06}\cite{frahm10}\cite{li08}\cite{roberts11}. None of the mentioned approaches justify this preference. This motivated us to investigate, which of the two key point detection/description systems performs better in the task of 3D reconstruction.\\
We used the famous "Temple of the Dioskouroi" dataset \cite{seitz06} for this evaluation for two reasons. For one, the dataset provides a tight bounding box for the object as well as the exact intrinsic and extrinsic camera parameters. These properties allow a controlled comparison of the two systems. Secondly,  although it is a miniture of the real temple, the viewing angles for the circular subset, called TempleRing, very closely approximate the possible viewing angles on the real temple.\\
The experiment was set up as follows:\\
For different sized subset of the TempleRing dataset, we recorded the computing time as well as several indicators of the detection and matching quality. The subsets were chosen in the sequential order in which they are presented in the dataset. This was done to ensure that the subsets really show an overlap in the experiment. The matching was done by a brute-force cross-checking algorithm as described in Subsection \ref{ssec:3d_model_creation}. No radius match of any kind was applied, to ensure that no feature extraction system is preferred.
To estimate the 3D points we used the ground truth projection matrices of the images. We estimated the 3D points with equation \ref{eq:project}, using all measurements available. This means if a track consisted of key points from 4 images we used all 4 key points for the 3D point estimation.
\paragraph{} In Figure \ref{fig:time} one can see the computing time of the two steps involved in both processes; the key point detection step and the feature extraction step. In the first stage, the SURF algorithm performed two times faster than the SIFT algorithm, where as, in the second stage the SIFT algorithm needed even four times more time than the SURF algorithm.\\
But as the following results show, this speed up of 2 and 4 in the stages is a trade-off between time and accuracy.
\paragraph{}  Figure \ref{fig:ransac-ratio} shows the ratio between the matches that are consistent with the RANSAC estimation of the fundamental matrix and the total number of features that were matched with cross-check matching. Image pairs that showed less than 20 matches after applying the RANSAC estimation were discarded as obvious outliers as suggested by Brown and Lowe\cite{brown05}. Note that on average SIFT produced approximately 1.25 times more key points than the SURF algorithm. Even in this stage the SURF algorithm shows approximately 0.8 times inferior results. In relation to the increasing number of images, which in this case also means an increasing scattering of the camera positions, both algorithms show the same behaviour. The increasing number of images causes more geometrical inconsistent matches. This is not surprising as image patches on the temple model look very similar independent from the view point.
\paragraph{} In Figure \ref{fig:ratio} one can see the ratio between outliers and inliers. An outlier was detected when the 3D point estimation would fall outside of the tight bounding box of the object. A soft outlier means that the 3D point is within a bounding box which has a 10 percent buffer on each side, but outside of the tight bounding box. A hard outlier, on the other hand, is detected when it is even outside of the buffered bounding box.
The results show that the SURF algorithm has a approximately 2 times higher $outlier/inlier$ ratio than the SIFT algorithm. This is true for hard and soft outliers. Both algorithms show a slowly rising behaviour in the number of soft outliers and a more unpredictable behaviour concerning the hard outliers. The number hard outliers also shows a trend upwards.
\paragraph{} Figure \ref{fig:tracksize} shows the analysis of the average track size of outliers and inliers. We thought that maybe the outliers are mainly caused by tracks with a low number of key points. The experiment showed that there seems to be no significant relation between the track size and whether or not a point is an outlier.

\section{Conclusion and Suggestions}
\label{sec:cns}

We have successfully implemented a piece of software that enables the user to create 3D models in a simple way. We have shown that our approach can successfully recover the 3D model as well as the camera parameters even when the restrictions we made for the initial pair were violated. The restrictions we made cause the system to fail if the violation is too severe. Our approach cannot recover from a bad initialisation. Adding a new camera with the nearest neighbour works fine. The bundle adjustment is a vital part of our system and lends it a certain degree of robustness. In future work we hope to remove the restrictions for the initialisation, and base it on the relation between the cameras which is defined through the fundamental matrix. The problem is that the fundamental matrix does not fully define the relative relation between two cameras and still 5 degrees of freedom remain even if the intrinsic parameters of both cameras are known. We tried to initialise the cameras within a quasi-Euclidean frame, which restricts the degrees of freedom further with a vague guess of the camera matrix \cite{beardsley97}. Unfortunately, we could not get this feature working until the deadline. In further work, we hope to resolve this problems and find a way to initialise the camera position without the usage of prior knowledge. For a more sophisticated way for the camera initialisation, we came up with the idea to use the information of the epipole of the new image in the already initialised images. The epipole in two images to the third image can be interpreted as the 3D world correspondence. Thus, one could estimate the position of the new camera in finding the mid-point of the shared perpendicular of the lines through the epipole and the image centres. This would give the translation of the camera, then the 3D point correspondences can be used to estimate the rotation and the focal length of the new camera.

\section{Student Contribution}
\label{sec:sc}

While Christian Mostegel mainly focused on implementation of object reconstruction in 3D space including tasks such as fundamental matrix estimation and point cloud generation, Dong Kim contributed on high level software architecture design and user interface design including Qt user interaction and OpenGL visualisation of the outcomes of Christian's work. In addition, Christian also put a lot of afford on benchmarking on different algorithms and testing various sample data sets to evaluate overall performance of the application implemented in this project. Preparations for the presentations and documentation work for the reports were completed by both members. The following table shows contributions of team members. 

\begin{table}
\begin{center}
\tablehead{}
\begin{tabular}{|c|c|c|}
\hline
 Task &
 Dong &
\arraybslash Christian\\\hline
 Key point and feature detection &
 Y &
\arraybslash Y\\\hline
 Cross Checking Matching Algorithm &
 Y &
\arraybslash Y\\\hline
 Kd-Tree Matching Algorithm &
 Y &
\arraybslash \\\hline
 Fundamental Matrix Estimation &
~
 &
\arraybslash Y\\\hline
 Track Creation &
~
 &
\arraybslash Y\\\hline
 Camera Initialisation &
~
 &
\arraybslash Y\\\hline
 Point Cloud Initialisation &
~
 &
\arraybslash Y\\\hline
 Outlier Rejection &
~
 &
\arraybslash Y\\\hline
 Bundle Adjustment &
~
 &
\arraybslash Y\\\hline
 Camera Integration &
~
 &
\arraybslash Y\\\hline
 General 2D GUI Programming &
 Y &
~
\\\hline
 Point Cloud Visualisation &
Y
 &
~
\\\hline
 Camera Visualisation &
 Y &
~
\\\hline
 3D Navigation &
 Y &
~
\\\hline
 Benchmark: SIFT and SURF &
~
 &
\arraybslash Y\\\hline
 Benchmark: CrossCheking and kd-Tree &
~
 &
\arraybslash Y\\\hline
 {\textquotedblleft}Norte Dame{\textquotedblright} Testing &
~
 &
\arraybslash Y\\\hline
 {\textquotedblleft}Quadrangle Sundial{\textquotedblright}
Testing &
~
 &
\arraybslash Y\\\hline
 {\textquotedblleft}UNSW Emblem{\textquotedblright} Dataset
Testing &
~
 &
\arraybslash Y\\\hline

\end{tabular}

\end{center}
\caption{Task distributions and contributions}
\end{table}

% and "HIS" in caps to complete the first word.



% Can use something like this to put references on a page
% by themselves when using endfloat and the captionsoff option.
\ifCLASSOPTIONcaptionsoff
  \newpage
\fi



\begin{thebibliography}{1}
\bibitem{opencv} OpenCV (Open Source Computer Vision), http://opencv.willowgarage.com.

\bibitem{opengl} OpenGL (Open Graphics Library) , Silicon Graphics International Corp., http://www.opengl.org.

\bibitem{qt} Qt framework, Nokia Corporation,
http://qt.nokia.com.

\bibitem{marr82}
D. Marr, \emph{Vision. A Computational Investigation into the Human Representation and Processing of Visual Information}, W.H. Freeman and Company,1982.

\bibitem{fischler87}
M. Fischler and R. Bolles. \emph{Random sample consensus:
a paradigm for model fitting with applications to image
analysis and automated cartography}, Readings in computer vision:
issues, problems, principles, and paradigms, 726-740, 1987.



\bibitem{beardsley97}
P. Beardsley, A. Zisserman, D. Murray, \emph{Sequential Update of Projective and Affine Structure from Motion},
International Journal of Computer Vision, Volume 23, Number 3, 235-259, 1997.

\bibitem{beis97}
J. Beis, D. Lowe, \emph{Shape Indexing using Approximate
Nearest-Neighbor Search in High-Dimensional Spaces}, In
Proceedings of the Interational Conference on Computer Vision
and Pattern Recognition, pages 1000–1006, 1997.

\bibitem{arya98}
S. Arya, D. Mount, N. Netanyahu, R. Silverman,
A. Wu, \emph{An optimal algorithm for approximate
nearest neighbor searching fixed dimensions}, J. of the ACM 45,
6, 891-923,1998.

\bibitem{hartley00}
R. Hartley and A. Zisserman, \emph{Multiple View Geometry
in Computer Vision}, Cambridge University Press, ISBN:
0521623049, 2000.

\bibitem{nister04}
D. Nist\'{e}r \emph{An efficient solution to the five-point relative pose problem}, PAMI 26, 756-770, 2004.

\bibitem{lowe04}
D. Lowe, \emph{Distinctive image features from scale-invariant keypoints}, IJCV, 60, 91-110, 2004

\bibitem{brown05}
M. Brown and D. Lowe, \emph{Unsupervised 3D object recognition and reconstruction in unordered datasets}, International Conference on 3-D Digital Imaging and Modeling (3DIM 2005), Ottawa, Canada, June 2005.

\bibitem{seitz06}
S.Seitz, B. Curless, J. Diebel, D. Scharstein, R. Szeliski,
\emph{A Comparison and Evaluation of Multi-View Stereo Reconstruction Algorithms},
CVPR, vol. 1, 519-526, 2006. 

\bibitem{snavely06}
N. Snavely, S. Seitz, R. Szeliski, \emph{Photo tourism: Exploring photo collections in 3D}, ACM Transactions on Graphics (SIGGRAPH Proceedings), 25(3), 835-846, 2006.

\bibitem{goesele07}
M. Goesele, N.Snavely, B. Curless, H. Hoppe, S. Seitz, \emph{Multi-View Stereo for Community Photo Collections},
Proceedings of ICCV 2007, Rio de Janeiro, Brasil, October 14-20, 2007. 

\bibitem{bay08}
H. Bay, A. Ess, T. Tuytelaars, L. Van Gool, \emph{SURF: Speeded Up Robust Features}, Computer Vision and Image Understanding (CVIU), Vol. 110, No. 3, pp. 346-359, 2008.

\bibitem{li08}
X. Li, C. Wu, C. Zach, S. Lazebnik,J.-M. Frahm, \emph{Modeling and Recognition of Landmark Image Collections Using Iconic Scene Graphs}, ECCV, 2008.

\bibitem{agarwal09}
S. Agarwal, N. Snavely, I. Simon, S. Seitz and R. Szeliski,\emph{Building Rome in a Day},
International Conference on Computer Vision, 2009, Kyoto, Japan.

\bibitem{lour09}
M. Lourakis and A. Argyros, \emph{SBA: A Software Package for Generic Sparse Bundle Adjustment}, ACM Trans. Math. Software, New York, NY, USA, 36(1), 1-30, 2009.

\bibitem{sinha09}
S. Sinha, D. Steedly and R. Szeliski, \emph{Piecewise Planar Stereo for Image-based Rendering},Twelfth IEEE International Conference on Computer Vision (ICCV 2009), 2009.

\bibitem{frahm10}
J.-M. Frahm, P. Georgel, D. Gallup, T. Johnson, R. Raguram, C. Wu, Y.-H. Jen, E. Dunn, B. Clipp, S. Lazebnik, and M. Pollefeys, \emph{Building Rome on a Cloudless Day.},
Proceedings of the European Conference on Computer Vision, 2010.


\bibitem{roberts11}
R. Roberts,S. Sinha, R.Szeliski,D. Steedly, \emph{Structure from motion for scenes with large duplicate structures}, CVPR, 2011.

\bibitem{zach11}
C. Zach, \emph{Simple Sparse Bundle Adjustment (SSBA)}, http://www.inf.ethz.ch/personal/chzach/opensource.html, version 2.0, June 10, 2011.

\bibitem{bentley75}
J. L. Bentley, \emph{Multidimensional Binary Search Trees Used for Associative Searching}, Communications of the ACM, 18(9):509-517, 1975.

\bibitem{CLRS}
T.Cormen, C. Leiserson, R. Rivest, C. Stein, \emph{Introduction to Algorithms}, Chapter 10., MIT Press and McGraw-Hill, 2010.

\end{thebibliography}



% that's all folks
\end{document}


