%\documentclass[journal,10pt,draftclsnofoot,onecolumn]{IEEEtran}
\documentclass[journal]{IEEEtran}
\usepackage{algorithmicx}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{siunitx}
\usepackage{hyperref}
\usepackage{mathrsfs}
\usepackage{amsfonts,amssymb}
\usepackage{color}
\usepackage{framed}
\usepackage{graphicx}
\usepackage{cite}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{lipsum}
\usepackage{CJKutf8}
\usepackage{leftidx}
\usepackage{booktabs}
\graphicspath{{./image/}}
\usepackage[justification=centering]{caption}
\usepackage{subcaption}
\captionsetup{font={scriptsize}}
\captionsetup[figure]{name=Fig., labelsep=space}
\captionsetup[table]{labelsep=quad}
\theoremstyle{plain}
\newtheorem{thm}{Theorem}[]
\newtheorem{lem}[thm]{Lemma}
\newtheorem{prop}[thm]{Proposition}
\newtheorem*{cor}{Corollary}
\theoremstyle{definition}
\newtheorem{defn}{Definition}[]
\newtheorem{conj}{Conjecture}[]
\newtheorem{exmp}{Example}[]
\theoremstyle{remark}

\begin{document}
\title{Visual Mapping and Relocalization\\
	{\large Learning and Works in horizon robotics}
}

\author{Ding Wendong
\thanks{This is the work as an internship student in Auto Driving, Horizontal Robotics.com, Beijing 100190 (Corresponding email: wendong.ding@hobot.cc)}
}

\maketitle


\begin{abstract}
SFMLearner achieves an unsupervised way of training video data to output both depth map and pose estimation, so we want to construct a learning based system to do the relocalization. By training Fully Convolutional Network(FCN), we got a network used stereo images to obtain depth map. Then with the trajectory, we can obtain a dense map(point cloud),  in the relocalization, we obtian more points to estimate the current pose. Unfortunately this work is not ready to give a good output, but still we learned a lot from the coding.
\end{abstract}

% Note that keywords are not normally used for peerreview papers.
\begin{IEEEkeywords}
unmanned vehicle, mapping, relocalization, local BA.
\end{IEEEkeywords}

\section{Introduction}
%SLAM its structure and parameters are highly depending on specification of sensors and application secne.
Right now, learning based method can not achieve good effect as features based methods, It's still a promising way to complete the visual localization. In the learning based method, we try to use depth estimation method to help increase the effect of relocalization.

\section{Learning based Relocalization Exploring}
Learning based method has demonstrated good performance in depth prediction, the dense depth map is useful for VO/SLAM methods\cite{tateno2017cnn}, the mapping system can complete dense reconstruction. To make it convenient for train network to predict the depth, an unspervised training method is necessary to obtain enough data. Following the structure of MonoDepth network\cite{godard2016unsupervised}, we can utilize the stereo data to complete the depth prediction.

\subsection{Network}
\subsubsection{Network Architecture}
Fully Convolutional Network (FCN) is used to do the depth prediction\cite{zhou2017unsupervised}, Based on VGG-16 network\cite{simonyan2014very}, in the upconvolutional layer, upsample and convolutional layer are used. We use conv and upconv net to reconstruct right image form left image, and reconstruct left image from left image\cite{godard2016unsupervised}.  The loss is consist the photometric reconstruction residual called as Appearance Matching Loss. In addition, left and right disparity consistency loss and disparity smooth are also include in the loss. The disparity smoothness is based on that disparities to be locally smooth with an L1 penalty on the disparity gradients. The network take use of the stereo image data and unsupervised learning depth without large scale labelled depth data. Structure of the network is shown as Fig.
\begin{figure}
\centering
\includegraphics[width=0.7\linewidth]{../image/net-structure}
\caption{The structure of FCN depth estimation net}
\label{fig:net-structure}
\end{figure}
\subsubsection{Training data}
The training data only need stereo camera images to train the net, and avoid the depth labelled data which is hard to obtain.
\subsection{Visual Odometry Front-End}
\subsubsection{Tracking}

\subsubsection{Frame Selection}
\begin{itemize}
\item Traced points less than threshold.
\item ss
\end{itemize}
\subsection{Mapping}
After we obtain the trajectory and depth of the image data, we can triangulate the point of image and construct the point cloud.
\subsection{Geometry}
\subsubsection{Lie Algebra}
In pose estimation, we want to optimize the pose with iteration, the best way of the disturbing the pose is to use Lie algerbra and Lie group reprent the pose and transforms.

\begin{CJK}{UTF8}{gbsn}
在位姿估计时, 使用迭代的方式可以获取更加精确的估计结果, 因此需要求解投影误差(光度值误差)对位姿的微分以及位姿的增量表示, 位姿矩阵满足李群$\mathbf{T}\in$SE(3). 位姿对应的李代数$\boldsymbol{\xi}\in\mathfrak{so(3)}$. 李群和李代数之间为对数映射, 经过对数映射,李群转换为李代数, 经过指数映射李代数转换为为李群. 假设$\boldsymbol{\xi}=[\boldsymbol{\rho},\boldsymbol{\phi}]$, $\mathbf{T}=\exp(\boldsymbol{\xi}^\wedge)=\begin{bmatrix}
\mathbf{R}& \mathbf{t}\\\mathbf{0}&1
\end{bmatrix}$,
可以得到
\begin{equation}
\exp(\boldsymbol{\xi}^\wedge) = 
\begin{bmatrix}
\exp{\boldsymbol{\phi}^\wedge}&\mathbf{J}\boldsymbol{\rho}\\\mathbf{0}&1
\end{bmatrix}
\end{equation}
%\begin{equation}
%\begin{split}
%\mathbf{R} &= \exp{\boldsymbol{\phi^\wedge}}\\ 
%\mathbf{t} &= \mathbf{J}\boldsymbol{\rho}
%\end{split}
%\end{equation}
令$\boldsymbol{\phi}=\theta\boldsymbol{\alpha}$, 可得
\begin{equation}
\exp{\boldsymbol{\phi^\wedge}}=\cos\theta\mathbf{I} + (1-\cos\theta)\mathbf{a}\mathbf{a}^T+\sin\theta\mathbf{a}^\wedge
\end{equation}

\begin{equation}
\mathbf{J} =\frac{\sin\theta}{\theta}\mathbf{I}+\left(1-\frac{\sin\theta}{\theta}\right)\mathbf{a}\mathbf{a}^T+\frac{1-
\cos\theta}{\theta}\mathbf{a}^\wedge
\end{equation}
在对数映射中, 求解$\theta$有一种更加方便的方式
\begin{equation}
\theta=\arccos\frac{\text{tr}(\mathbf{R})-1}{2}
\end{equation}
注意在$\theta\rightarrow0$的时候, $\frac{\sin\theta}{\theta}\rightarrow1$, 因此$\mathbf{J}=\mathbf{I}$.
\end{CJK}

\subsubsection{Local BA}
\begin{CJK}{UTF8}{gbsn}
在后端中, 待优化的对象为窗口中的关键帧及其观测到的地图点. 优化问题可以转化为一个非线性最小二乘问题. 可以使用scipy.optimize中的least\_square函数完成, 该函数可以使用三种迭代方法, LM, TRF, Dogbox. 通过jac指定雅克比矩阵. 通过jac\_sparsity指定该优化问题的稀疏性. 

如何实现改代码呢?
1. 将Map Point和Frame的XYZ和SE3堆在一切,设置indices.
2. jac\_sparsity对应一个基于行的链表矩阵(lil matrix), 该结构用于增量式的构建稀疏矩阵. 
\end{CJK}

In the local BA module, we stack the observations into Hessian matrix, every observation corresponds to a camera pose and a map point. Using the projection module, we can reproject the map point to the image plane to calculate the observation residual. In another way, if we know that a group of point (e.g. points in road surface) are located in a plane, we can estimate the plane's parameter equation, by using camera's pose and plane parameter equation, we can obtain the homograph between the world plane and image\cite{ma2016cpa}. Suppose that we have world plane's parameter equation is
\begin{equation}\label{eq:plane parameter}
\mathbf{n}^T\mathbf{p} + d = 0
\end{equation}
%Suppose that $\mathbf{N} = [\mathbf{n}^T,d]^T$, $\tilde{\mathbf{p}} = [\mathbf{p}^T,1]^T$, then we have
%\begin{equation}
%\mathbf{N}^T\tilde{\mathbf{p}} = 0
%\end{equation}
%If we have n points $\tilde{\mathbf{p}}_i, i\in[0,n]$ and $\mathbf{Pn}=$
where $\mathbf{n}$ is the plane normal, $d$ is the distance from origin to plane. $\mathbf{p}$ is the point in world plane, then, we have
\begin{equation}
-\frac{\mathbf{n}^T\mathbf{p}}{d}=1
\end{equation}
If the camera's pose is $[\mathbf{R},\mathbf{t}]$, the point in image plane is
\begin{equation}\label{eq:reproject}
\mathbf{m}=\mathbf{K}\left(\mathbf{R}\mathbf{p} + \mathbf{t}\right)
\end{equation}
where $\mathbf{m}$ is the image point, $\mathbf{K}$ is the camera intrinsic parameter matrix. Substitute \eqref{eq:plane parameter} to \eqref{eq:reproject}, we have
\begin{equation}
\begin{split}
\mathbf{m}&=\mathbf{K}\left(\mathbf{R}\mathbf{p} + \mathbf{t}\right)\\
&= \mathbf{K}\left(\mathbf{R}\mathbf{p} -\frac{\mathbf{n}^T\mathbf{p}}{d}\mathbf{t}\right)\\
&= \mathbf{K}\left(\mathbf{R}\mathbf{p} -\frac{\mathbf{t}\mathbf{n}^T\mathbf{p}}{d}\right)\\
&= \mathbf{K}\left(\mathbf{R} -\frac{\mathbf{t}\mathbf{n}^T}{d}\right)\mathbf{p}\\
\end{split}
\end{equation}
then, the homography matrix is
\begin{equation}
\mathbf{H} = \mathbf{K}\left(\mathbf{R} - \frac{\mathbf{t}\mathbf{n}^T}{d}\right)
\end{equation}
where $\mathbf{H}\in \mathbb{R}^{3\times3}$ is the homograph matrix.
In normal case, the Hessian matrix in local BA is sparse, caused that we do not add prior of the geometry. Here by using the plane and homograph of geometry, we construct data association between point.

Since we use tensorflow as CNN libraries, it is necessary to cooperate the geometry module with FCN module. we also write a python version simple front end and local BA to do the geometry works.
\subsection{Implementation}
\subsubsection{Depth Estimation}
The test data is usually stored as ros bag file, to use it smoothly, we use python rosbag package to decode the msg in bag file, construct place holder for left and right input of FCN net.
\subsubsection{Pose Estimation}
Sophus library makes it convenient for iteration in pose estimation, there are python wrapper of Sophus, its depend on the Eigency which is python wrapper of Eigen. These package is based on numpy, their speed is a little low, so we rewrite some Lie algebra functions to use Lie algebra in the system.
\subsubsection{Local Bundle Adjustment}
The implementation of localBA is base on SciPy package, SciPy supports sparse non-linear least square, To implement spare bundle adjustment, we can set row-based linked list sparse matrix to flag the non-zeros position in the Hessian matrix.

\subsection{Experiment}
\subsubsection{Kitti Raw Data}
The FCN net is trained by Ktti Raw data, we also test it on Kitti data. Kitti Raw data is using color stereo camera with base line at 58.6cm.  RGB data is used and input to FCN. The result is shown as Fig. \ref{fig:disparityKitti}.
\begin{figure*}[t]
\centering
\includegraphics[height=1.2in]{../image/depth/kitti/disparity000001}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/kitti/0000000000}}
\includegraphics[height=1.2in]{../image/depth/kitti/disparity000025}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/kitti/0000000025}}
%\includegraphics[height=1.2in]{../image/depth/kitti/disparity000050}
%\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/kitti/0000000050}}
%\includegraphics[height=1.2in]{../image/depth/kitti/disparity000075}
%\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/kitti/0000000075}}
\caption{Depth estimation on Kitti data, left is the depth image, right is the origin color left image.}
\label{fig:disparityKitti}
\end{figure*}

\subsubsection{Hobot Road Data}
Hobot data is captued with stereo camera OV580. This camera outputs gray image, when using the network, we duplicate the gray imaget to have RGB 3 channels. The baseline of the camera is 14.5cm. The result is a not good as the Kitti data. The depth of object far from the camera is coarse as shown in Fig.\ref{fig:disparityHobotRoad}. In which we can see that shaodow does not affect the depth result.
\begin{figure*}[t]
\centering
\includegraphics[height=1.2in]{depth/hobot/disparity000001}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{depth/hobot/000001}}\\
%\includegraphics[width=0.4\linewidth]{depth/hobot/disparity000025}
%\raisebox{0.1\height}{\includegraphics[width=0.4\linewidth]{depth/hobot/000500}}
\includegraphics[height=1.2in]{depth/hobot/disparity000050}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{depth/hobot/001000}}\\
\includegraphics[height=1.2in]{depth/hobot/disparity000075}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{depth/hobot/002000}}\\
\includegraphics[height=1.2in]{depth/hobot/disparity000100}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{depth/hobot/003000}}
%\includegraphics[width=0.5\linewidth]{disparity000150}
%\includegraphics[width=0.5\linewidth]{gray000150}
\caption{Depth estimation on Hobot Road data, left is the depth image, right is the origin color left image}
\label{fig:disparityHobotRoad}
\end{figure*}
\subsubsection{Hobot Garage Data}
Fig. \ref{fig:disparityHobotGarage} shows the experiment result on Hobot Garage Data. The effect is good even there exists fluorescent lamp with high lightness in the scene, but hand the dark object output not good estimation, it 'thinks' there exists some closed object.
\begin{figure*}[t]
\centering
\includegraphics[height=1.2in]{../image/depth/hobot_garage/disparity000001}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/hobot_garage/000001}}\\
\includegraphics[height=1.2in]{../image/depth/hobot_garage/disparity000025}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/hobot_garage/000250}}\\
\includegraphics[height=1.2in]{../image/depth/hobot_garage/disparity000100}
\raisebox{0.1\height}{\includegraphics[height=1.03in]{../image/depth/hobot_garage/002050}}
\caption{Depth estimation on Hobot garage data, left is the depth image, right is the origin color left image}
\label{fig:disparityHobotGarage}
\end{figure*}


\section{Conclusion}
I have been working and learning in Horizon  Robotics for about 3 months. The work is mainly focused on the visual localization and mapping.

After working about DSO front end, I more deeply understand its principle and codes, I have rearranged some modules and make it more clear, due to limited time, this work is not able to give good output.

The mapping and relocalization's work is mainly to correct the drift of VIO system and aiming to give a localization result which is independent to the running time. The localization result is good in some ground scene and a little bad in garage scene.

The work about learning based relocalization is not complete. The depth prediction module is trained and evaluated in both Kitti raw and hobot data. I have also written some geometry module to make it convenient for a whole leaning based relocalization module.

Also this report may have many errors as it is a hastily writing. Some conclusion is only described without detailed data and figure demonstration, and some works is not completing and only gives some coarse result. But still, thanks to mentor Qinrui Yan, Degang Yang, Dr. Yinan Yu, and other partner in AD of hobot. In this family, I have learned a lot in this work and met many experienced experts in auto drive field.


\bibliographystyle{IEEEtran}
\bibliography{IEEEabrv,ref}

\end{document}
