\documentclass[a4paper,twoside]{article}
\usepackage{blindtext}
\usepackage{geometry}

% Page margin layout
\geometry{left=2.3cm,right=2cm,top=2.5cm,bottom=2.0cm}

\usepackage{listings}
\usepackage{xcolor}
\usepackage{geometry}
\usepackage{amsmath}
\usepackage{float}
\usepackage{hyperref}

\usepackage{graphics}
\usepackage{graphicx}
\usepackage{subfigure}
\usepackage{epsfig}
\usepackage{float}

\usepackage{algorithm}
\usepackage[noend]{algpseudocode}

\usepackage{booktabs}
\usepackage{threeparttable}
\usepackage{longtable}
\usepackage{listings}
\usepackage{tikz}
\usepackage{multicol}

% cite package, to clean up citations in the main text. Do not remove.
\usepackage{cite}

\usepackage{color,xcolor}

%% The amssymb package provides various useful mathematical symbols
\usepackage{amssymb}
%% The amsthm package provides extended theorem environments
\usepackage{amsthm}
\usepackage{amsfonts}
\usepackage{enumerate}
\usepackage{enumitem}
\usepackage{listings}

\usepackage{indentfirst}
\setlength{\parindent}{2em} % Make two letter space in the first paragraph
\usepackage{setspace}
\linespread{1.5} % Line spacing setting
\usepackage{siunitx}
\setlength{\parskip}{0.5em} % Paragraph spacing setting
\usepackage{abstract}
\renewcommand{\abstractnamefont}{\Large\bfseries}

\renewcommand{\figurename}{Figure}
\renewcommand{\lstlistingname}{Code} 
\renewcommand{\tablename}{Sheet}
\renewcommand{\contentsname}{Catalog}

\graphicspath{ {images/} }

%%%%%%%%%%%%%
\newcommand{\StudentNumber}{22920202200764}  % Fill your student number here
\newcommand{\StudentName}{BenChen Liu}  % Replace your name here
\newcommand{\PaperTitle}{DIP Survey Report}  % Change your paper title here
\newcommand{\PaperType}{Digital Image Processing} % Replace the type of your report here
\newcommand{\Date}{Dec. 12, 2022}
\newcommand{\College}{School of Infomatics Xiamen University}
\newcommand{\CourseName}{Digital Image Processing}
%%%%%%%%%%%%%

%% Page header and footer setting
\usepackage{fancyhdr}
\usepackage{lastpage}
\pagestyle{fancy}
\fancyhf{}
% This requires the document to be twoside
\fancyhead[LO]{\texttt{\StudentName }}
\fancyhead[LE]{\texttt{\StudentNumber}}
\fancyhead[C]{\texttt{\PaperTitle }}
\fancyhead[R]{\texttt{Page {\thepage}，Total \pageref*{LastPage}}}

\title{\PaperTitle}
\author{\StudentName}
\date{\Date}


\lstset{
	basicstyle          =   \sffamily,          % 基本代码风格
	keywordstyle        =   \bfseries,          % 关键字风格
	commentstyle        =   \rmfamily\itshape,  % 注释的风格，斜体
	stringstyle         =   \ttfamily,  % 字符串风格
	flexiblecolumns,                % 别问为什么，加上这个
	numbers             =   left,   % 行号的位置在左边
	showspaces          =   false,  % 是否显示空格，显示了有点乱，所以不现实了
	numberstyle         =   \zihao{-5}\ttfamily,    % 行号的样式，小五号，tt等宽字体
	showstringspaces    =   false,
	captionpos          =   t,      % 这段代码的名字所呈现的位置，t指的是top上面
	frame               =   lrtb,   % 显示边框
}

\lstdefinestyle{PythonStyle}{
	language        =   Python, % 语言选Python
	basicstyle      =   \zihao{-5}\ttfamily,
	numberstyle     =   \zihao{-5}\ttfamily,
	keywordstyle    =   \color{blue},
	keywordstyle    =   [2] \color{teal},
	stringstyle     =   \color{magenta},
	commentstyle    =   \color{red}\ttfamily,
	breaklines      =   true,   % 自动换行，建议不要写太长的行
	columns         =   fixed,  % 如果不加这一句，字间距就不固定，很丑，必须加
	basewidth       =   0.5em,
}

\lstdefinestyle{CppStyle}{
	language        =   c++,
	basicstyle      =   \zihao{-5}\ttfamily,
	numberstyle     =   \zihao{-5}\ttfamily,
	keywordstyle    =   \color{blue},
	keywordstyle    =   [2] \color{teal},
	stringstyle     =   \color{magenta},
	commentstyle    =   \color{red}\ttfamily,
	breaklines      =   true,   % 自动换行，建议不要写太长的行
	columns         =   fixed,  % 如果不加这一句，字间距就不固定，很丑，必须加
	basewidth       =   0.5em,
}

\algnewcommand\algorithmicinput{\textbf{Input:}}
\algnewcommand\algorithmicoutput{\textbf{Output:}}
\algnewcommand\Input{\item[\algorithmicinput]}%
\algnewcommand\Output{\item[\algorithmicoutput]}%

\usetikzlibrary{positioning, shapes.geometric}


\begin{document}

\makeatletter % change default title style
\renewcommand*\maketitle{%
	\begin{center}
		\bfseries  % title 
		{\LARGE \@title \par}  % LARGE typesetting
		\vskip 1em  %  margin 1em
			{\global\let\author\@empty}  % no author information
			{\global\let\date\@empty}  % no date
		\thispagestyle{empty}   %  empty page style
	\end{center}%
	\setcounter{footnote}{0}%
}
\makeatother

\thispagestyle{empty}

\vspace*{1cm}

\begin{figure}[h]
	\centering
	\includegraphics[width=6.0cm]{logo.png}
\end{figure}

\vspace*{1cm}

\begin{center}
	\Huge{\textbf{\PaperType}}

	\Large{\PaperTitle}
\end{center}

\begin{table}[h]
	\centering
	\begin{Large}
		\renewcommand{\arraystretch}{1.5}
		\begin{tabular}{p{3cm} p{5cm}<{\centering}}
			Name       & \StudentName   \\
			\hline
			Student ID & \StudentNumber \\
			\hline
			Date       & \Date          \\
			\hline
			Institute  & \College       \\
			\hline
			CourseName & \CourseName    \\
			\hline
		\end{tabular}
	\end{Large}
\end{table}
\newpage

\begin{abstract}
	This report is to sort out the development history and various
	characteristics of object detection algorithms based on deep learning.
	The understanding of object object detection algorithm was started
	from the robotics class, because YOLOv3 was used for object detection.
	Therefore, I also chose object detection as the topic of my DIP report.
	The object detection algorithm initially uses non-deep learning methods
	such as DPM Detector and other algorithms. However, due to its accuracy,
	object detection algorithm based on deep learning has gradually become
	the mainstream algorithm of object detection algorithm, which can be
	divided into One Stage Algorithm (Yolo v1,v3,v5 SSD) and Two Stage Algorithm(R-CNN, SPPNet, Fast R-CNN).
	In 2020, there is also a one stage object detection algorithm DETR which is based on Transformer architecture.
	The improvement of these algorithms has enabled people to obtain better and better
	results on the existing object detection tasks.
\end{abstract}
\noindent{KeyWord:\textbf{ Object Detection\quad Deep Learning \quad YOLO \quad R-CNN \quad DETR}}

\newpage
\tableofcontents

\newpage
\section{Introduction}
\subsection{The main task of object detection}
\paragraph{Object detection} is a computer technology that used for
detecting instance of object of a specifed class in digital images or videos.\textsuperscript{\cite{review}}
Since every object class has its own special features that helps in classifying the class,
the main goal of object detection is identifying the certain object by recognising features.
It's widely used in computer vision tasks such as image annotation, pedestrain detection,
face recognition and many other fields.

\begin{figure}[htbp]
	\centering
	\subfigure[YOLO Detection Example]{
		\begin{minipage}[t]{0.48\linewidth}
			\centering
			\includegraphics[width=8cm]{yolo_detection_demo.jpeg}
			% \caption{}
		\end{minipage}%
	}%
	\subfigure[Image Annotation]{
		\begin{minipage}[t]{0.48\linewidth}
			\centering
			\includegraphics[width=8cm]{image_annotation.png}
			% \caption{}
		\end{minipage}%
	}%
	\caption{Main Task of Object Detection}
\end{figure}

\subsection{Some previous research}
Before 2014, scholars in the field of object detection had done a
lot of research, and many practical algorithms had been proposed.
Such as SIFT \textsuperscript{\cite{ref1}} algorithm,
Viola-Jones object detection framework \textsuperscript{\cite{ref3}}
algorithm, HOG algorithm \textsuperscript{\cite{ref2}} and so on.
However, these algorithms are based on non-deep learning methods,
and they also have good performance in accuracy. For example,
the current SIFT algorithm can achieve real-time target detection,
and the target feature points obtained by this algorithm are few and precise.
Such algorithm design greatly reduces the computational load of the computer.
This algorithm also has many advantages, such as feature point matching and so on.
However, compared with the object detection algorithm based on deep learning,
there is still a certain gap in accuracy.

\subsection{Two ideas of object detection in Deep Learning}
Object detection algorithms based on deep learning are mainly
divided into two schools, One Stage Model and Two Stage Model, respectively.
The criterion for genre classification is the way in which the proposed area (anchor box) is treated.
Generally speaking, the processing speed of One Stage Model is relatively fast, but the accuracy is relatively low.
Two Stage Model has high accuracy but can not achieve fast recognition.

\begin{figure}[htbp]
	\centering
	\subfigure[One Stage Model]{
		\begin{minipage}[t]{0.48\linewidth}
			\centering
			\includegraphics[width=8cm]{one_stage_model.png}
			% \caption{}
		\end{minipage}%
	}%
	\subfigure[Two Stage Model]{
		\begin{minipage}[t]{0.48\linewidth}
			\centering
			\includegraphics[width=8cm]{two_stage_model.png}
			% \caption{}
		\end{minipage}%
	}%
	\caption{Two Types of Object Detection Model}
\end{figure}
\paragraph*{}
\paragraph{Two Stage Model}
The main idea of Two Stage Model can be devided into two steps. Firstly,
we generate branch of region with region proposal network. Then, we extract image
features by convolutional neural network. Finally, we merge the outputs of these
two steps. R-CNN, for example, uses this architecture for object detection.
Although the model effectively extracts image features using a pre-trained
convolutional neural network, it is still very slow.
\paragraph*{}
For instance, if thousands
of proposed regions are selected from an image, it would require thousands of
forward propagation of the convolutional neural network to perform object detection,
which is very computationally intensive.
So, due to the complex network structure, the model has better accuracy but slower execution speed.
As we will write in a later report, people have improved this model to a higher level of accuracy (Fast R-CNN, Faster R-CNN),
and now some models based on this idea have been applied to real-world applications with high accuracy
requirements, such as autonomous driving.
\paragraph{One Stage Model}
Unlike Two Stage Model, One Stage Model directly performs convolution operation on the whole
image for feature extraction ,generating anchors and classifying anchors.
One Stage Model make a fixed number of predictions on grid initially.
However, because both anchor classification and anchor localization are needed under the same
set of neural networks, the parameters in the neural network are relatively more
and more difficult to “learn”.

\paragraph{}
In order to understand what's in an image, we'll feed our input through a
standard convolutional network to build a rich feature representation of the original image.
We'll refer to this part of the architecture as the "backbone" network, which is usually
pre-trained as an image classifier to more cheaply learn how to extract features from an image.
Then, we use Non-maximum suppression algorithm on each class saperately to remove redundant predictions.
Finally, we get the images with annotation and confidence.

\subsection{Research Timeline}
\begin{figure}[htbp]
	\centering
	\includegraphics[width=16cm]{timeline.png}
	\caption{Object Detection Research Timeline \textsuperscript{\cite{timeline}}}
\end{figure}
\newpage

\section{Two Stage Model}
\paragraph{}A two stage model is a type of deep learning model that makes predictions in two steps. In the first step, the model processes the input data and generates a set of intermediate predictions. These intermediate predictions are then used in the second step to generate the final predicted output.

\paragraph{} Two-stage models are often used in tasks that require complex reasoning or that involve multiple stages of processing, such as object detection in images or natural language processing tasks. For example, in object detection, the first stage of a two-stage model might involve identifying potential objects in an image, while the second stage would involve classifying these objects and determining their positions in the image.

\paragraph{} One advantage of two-stage models is that they can be trained to perform multiple tasks in a single model, which can be more efficient than training multiple single-stage models. This can also make the model more robust and improve its performance on a wide range of tasks. Additionally, two-stage models can incorporate more complex reasoning processes and can be more flexible than single-stage models.

\paragraph{} However, there are also some challenges associated with training and using two-stage models. Because they involve multiple stages of processing, two-stage models can be more computationally expensive than single-stage models, which can make them slower to train and use. Additionally, two-stage models may require more data to train effectively, which can limit their use in certain applications.

\paragraph{} Overall, two-stage models offer a powerful approach for addressing complex prediction tasks and can provide superior performance in many situations. However, they also come with some challenges that must be carefully considered when deciding whether to use this type of model.
\subsection{Region-Based Convolutional Networks (R-CNN)\textsuperscript{\cite{rcnn}}}
\paragraph{} R-CNN (\textbf Regions with \textbf Convolutional \textbf Neural \textbf Networks) is a type of deep learning model for object detection. It was first introduced by Ross Girshick et al. in 2014 in a paper published in the journal "IEEE Transactions on Pattern Analysis and Machine Intelligence".
\begin{figure}[htbp]
	\centering
	\includegraphics*[width=12cm]{rcnn.png}
	\caption{Architecture of R-CNN}
\end{figure}
R-CNN is a two-stage model that uses a convolutional neural network (CNN) to identify potential objects in an image, and then uses a support vector machine (SVM) classifier to classify these objects and determine their positions in the image. In the first stage of the model, a CNN is used to generate a set of region proposals, which are regions of the image that may contain objects. These proposals are then passed to the SVM classifier in the second stage, which assigns a class label and bounding box coordinates to each proposal.

R-CNN was one of the first successful deep learning models for object detection and achieved state-of-the-art performance on a number of benchmark datasets at the time of its publication. However, it has since been surpassed by more efficient and effective object detection models, such as Fast R-CNN and Faster R-CNN. Despite this, R-CNN remains an important milestone in the development of object detection models and continues to be used in research and industry applications.
\subsection{Fast R-CNN\textsuperscript{\cite{fastrcnn}}}
Fast R-CNN is a deep learning object detection algorithm. It is a faster variant of the popular R-CNN (Region-based Convolutional Neural Network) object detection algorithm. Like R-CNN, it uses a convolutional neural network (CNN) to propose regions in an image that are likely to contain objects, and then uses a separate classifier to determine the class of each object. However, unlike R-CNN which performs these steps independently for each region proposal, Fast R-CNN shares computation between the region proposal and object classification stages, making it faster and more efficient. Fast R-CNN is often used in applications where real-time object detection is required, such as in autonomous vehicles and robotics.
\begin{figure}[htbp]
	\centering
	\includegraphics*[width=10cm]{rcnn.png}
	\caption{Architecture of Fast R-CNN}
\end{figure}
\pagebreak
\paragraph{Some technical details}
\begin{itemize}
	\item Fast R-CNN uses a CNN to generate region proposals, typically using a technique called selective search.
	\item The region proposals are then passed through a RoI (Region of Interest) pooling layer, which resizes each proposal to a fixed size, allowing the CNN to process them uniformly.
	\item The pooled region proposals are then fed into the final classification layers of the CNN, which predict the class of each object.
	\item Fast R-CNN uses a multi-task loss function that jointly trains the region proposal and object classification stages. This allows the model to learn to optimize both tasks simultaneously, making it faster and more efficient than R-CNN.
\end{itemize}
\paragraph*{}But Fast R-CNN still has several potential disadvantages, including its use of selective search for region proposal generation, its multi-stage pipeline structure, its reliance on stochastic gradient descent for training, and its relatively slow processing speed compared to some other object detection algorithms. These limitations have motivated the development of improved algorithms such as Faster R-CNN and Mask R-CNN, which address some of these shortcomings and offer better performance and efficiency.
\subsection{Faster R-CNN \textsuperscript{\cite{fasterrcnn}}}
\paragraph{}
Faster R-CNN is a new generation of deep learning object detection algorithm based on Fast R-CNN. Rather than using selective search, Faster R-CNN uses a Region Proposal Network (RPN) to generate proposals.
The reason why Fast R-CNN is faster than R-CNN is because you don't have to feed 2000 region proposals to the convolutional neural network every time. Instead, the convolution operation is done only once per image, and a feature map is generated from it.
\paragraph*{Some Improvement Compared with Fast R-CNN}
\begin{itemize}
	\item Faster R-CNN uses a Region Proposal Network (RPN) to generate region proposals, rather than using selective search as in Fast R-CNN. This makes the algorithm faster and more efficient, as the RPN can be trained end-to-end along with the rest of the network.
	\item Faster R-CNN uses a multi-task loss function that jointly trains the region proposal and object classification stages. This allows the model to learn to optimize both tasks simultaneously, making it more effective at both tasks.
	\item Faster R-CNN can use a deeper CNN architecture as its base network, allowing it to learn more powerful feature representations and improve its object detection performance.
	\item Faster R-CNN has been shown to achieve state-of-the-art performance on a variety of object detection benchmarks, outperforming Fast R-CNN and other algorithms.
\end{itemize}
\subsection{Mask R-CNN \textsuperscript{\cite{maskrcnn}}}
\paragraph{}Mask R-CNN was introduced by a team at Facebook AI Research (FAIR).The key idea behind Mask R-CNN is to extend Faster R-CNN,
by adding a branch for predicting an object mask (Region of Interest) in parallel with the existing branch for bounding box regression.
This allows Mask R-CNN to not only predict the class and location of objects in an image, but also to generate a pixel-level mask for each object.
\paragraph*{}
The main advantage of Mask R-CNN over Faster R-CNN is its ability to perform instance segmentation, which involves not only detecting and classifying objects in an image, but also generating a pixel-level mask for each instance of an object. This allows Mask R-CNN to provide more detailed and accurate information about the objects in an image, which is useful in a variety of applications.

\begin{figure}[htbp]
	% \centering
	\subfigure[Segmentation]{
		\begin{minipage}[t]{0.5\linewidth}
			\centering
			\includegraphics[width=7.5cm]{semantic_segmentation.png}
			% \caption{}
		\end{minipage}%
	}%
	\subfigure[Mask R-CNN Architecture]{
		\begin{minipage}[t]{0.45\linewidth}
			\centering
			\includegraphics[width=7cm]{maskrcnn.png}
			% \caption{}
		\end{minipage}%
	}%
	% \caption{Concepts of Mask R-CNN}
\end{figure}

\paragraph*{}
Another advantage of Mask R-CNN is that it is more computationally efficient than Faster R-CNN because it uses RoI pooling, which allows the model to process regions of interest in parallel and thus improve the speed of object detection.
Further more, Mask R-CNN is easy to generalize to other tasks. For example, it is possible to use Mask R-CNN for human pose estimation in the same framework.

\paragraph*{}However, Mask R-CNN's speed is relatively slow (showing in the figure below).
As we can see from the figure below, its real-time speed is less than 10 FPS (GTX 1080Ti).
So it can not meet the requirements of real-time target tracking.
On the other hand, if you don't need instance segmentation, you can try some One Stage Model like YOLO, SSD, etc. for object detection.

\begin{figure}[htbp]
	\centering
	\includegraphics[width=14cm]{twostagemodelspeed.png}
	\caption{Speed Comparasion of Existing Two Stage Model \textsuperscript{\cite{twostagemodel}}}
\end{figure}

\newpage
\section{One Stage Model}In the context of object detection,
a one-stage model is a type of deep learning model that is designed to identify objects within an image or video in a single step.
Compared with, the model processes the entire input image or video and outputs a prediction for each object that it detects, without the need for multiple stages or layers.
One-stage object detection models are typically less accurate than two-stage or multi-stage models, but they can be faster and more efficient to train and use. Some examples of one-stage object detection models include YOLO (\textbf You \textbf Only \textbf Look \textbf Once) and SSD (\textbf Single \textbf Shot MultiBox \textbf Detector). These models are designed to be able to make predictions quickly, which can be useful in real-time applications where fast detection is required.

Later, DETR (\textbf {DE}tection \textbf {TR}ansformer), a deep learning object detection algorithm based on Transformer architecture, has also become a member of the One Stage model.
Because of self-attention mechanisms, the DETR algorithm is trained to directly predict the locations and labels of objects in an image, without the need for separate region proposal or classification stages.
DETR is a promising new approach to object detection that has shown good performance in several benchmarks. It has the potential to be faster and more efficient than other methods, and it may be a useful tool for a variety of applications.

\subsection{Single Shot Multibox Detection (SSD) \textsuperscript{\cite{ssd}}}
Single Shot Multibox Detection (SSD) algorithm is a popular method for object detection that uses a single
neural network to predict bounding boxes and class probabilities
for objects in an image.
SSD makes predictions using a single forward pass of the network,
without the need for region proposal or post-processing steps.

The key idea behind SSD is to combine the predictions from multiple
layers of a convolutional neural network, where each layer predicts
bounding boxes and class probabilities at different scales.
This allows the algorithm to detect objects of various sizes in the image.

To make predictions, SSD first uses a base network, such as VGG or ResNet,
to extract features from the input image. Then, it applies a series of
convolutional layers to the feature map to predict bounding boxes and class
probabilities. Each of these layers has a different receptive field, which
allows it to make predictions at different scales.
And the predictions from each layer are then combined to form the final
detection results. SSD also uses techniques like non-maximum suppression
and default boxes to improve the accuracy of its predictions.

Although compared with the traditional Two Stage model, the SSD
algorithm has a good improvement in detection speed. However, the SSD algorithm is still relatively slow because it uses a series of convolutional layers for feature extraction operations when making predictions.
And the next algorithm we will talk about solve this problem in a graceful way.



\subsection{You Only Look Once (YOLO) \textsuperscript{\cite{yolov1}}}
You only look once (YOLO) is a state-of-the-art, real-time object detection system. It is called "you only look once" because it applies a single neural network to the input image only once, rather than using a pipeline of networks as in some other object detection algorithms.
After "look" at the image for the first time, YOLO start to calculate the Intersection Over Union (IoU) \footnote{In object detection, precision and recall are not for class predictions, but for predictions of boundary boxes for measuring the decision performance. An IoU value > 0.5. is taken as a positive prediction, while an IoU value < 0.5 is a negative prediction.} to obtain the final bounding box.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=5cm]{ioufunction.png}
	\caption{IOU Function\textsuperscript{\cite{ioufunction}}}
\end{figure}

YOLO uses a single convolutional neural network to predict the bounding boxes and class probabilities for objects in an input image. The key advantage of YOLO is that it can run in real-time, making it suitable for applications that require low-latency detection.
It has been trained on a large dataset and can detect a wide range of objects, including people, cars, traffic signs, and animals. It is also relatively accurate, On a Pascal Titan X YOLO v3 processes images at 30 FPS and has a mAP of 57.9\% on COCO test-dev. Although it may not be as accurate as some other object detection algorithms.



\paragraph{How it works?}
The YOLO algorithm works by dividing the image into N grids, each having an equal dimensional region of SxS. Each of these N grids is responsible for the detection and localization of the object it contains.
Correspondingly, these grids predict B bounding box coordinates relative to their cell coordinates, along with the object label and probability of the object being present in the cell, which
brings forth a lot of duplicate predictions due to multiple cells predicting the same object with different bounding box predictions.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=12cm]{yoloworkflow.png}
	\caption{YOLO Workflow}
\end{figure}

Then YOLO makes use of Non Maximal Suppression to deal with all bounding boxes that have lower probability scores.YOLO achieves this by first looking at the probability scores associated with each decision and taking the largest one. Following this,
it suppresses the bounding boxes having the largest Intersection over Union (IoU) with the current high probability bounding box\textsuperscript{\cite{yolov3}}.
By repeating this step, the final bounding boxes are obtained.


\subsection{DETR\textsuperscript{\cite{detr}}}
DETR stands for "DEtection TRansformer", and it is a type of end-to-end
artificial neural network designed for object detection in images.
It is a variant of the popular transformer architecture, which was
initially developed for natural language processing tasks but has since been
applied to other domains, including computer vision. DETR uses self-attention
mechanisms to learn relationships between different objects in an image, allowing
it to make predictions about their locations and classes.
Based on Transformer's global modeling capability, we do not need to use the NMS (Non-Maximal Suppression) algorithm for anchor redundancy processing.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=16cm]{architectureofdetr.png}
	\caption{Architecture of DETR}
\end{figure}

DETR makes a great innovation to the existing object detection algorithms.
DETR treats detection as a set prediction problem and uses Transformers to predict the set of boxes.
Below is a partial summary of the paper: "Our approach streamlines the detection pipeline, effectively
\textbf {removing the need} for many hand-designed components like a non-maximum suppression procedure
or anchor generation that explicitly encode our prior knowledge about the task".

Like standard Transformer architecture, DETR sent an image through a pre-trained convolutional backbone (in the paper, the authors use ResNet-50/ResNet-101)
to get feature map. Then DETR flattened the tensor and put it into the encoder. Combined object querys (like attention head) which limits the number of object with the output of encoder,
we put it into decoder to obtain the object bounding boxes.DETR's results are also impressive. In several cases, mAP is higher than existing models.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=17cm]{visualizationofbox.png}
	\caption{Visualization of Query Box}
\end{figure}

In DETR's paper, the author provides a partial visualization of object query and the decoder attention for every predicted object, which think this is the most interesting part of this article.
From these visualizations, we can see that DETR has learned from the given COCO dataset "to find the object from the center of the image".
This also has to do with the characteristics of the COCO dataset. And we can see that DETR's attention mechanism highlights the features of the object. In general, objects in the COCO dataset have two characteristics. They are usually in the center of the picture and large.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=14cm]{selfattentionofobjecty.png}
	\caption{Encoder Self-attention for a set of reference points}
\end{figure}

Of course, DETR has its problems, such as slow convergence during training, high computational complexity, difficulty with high-resolution images, insensitivity to small objects, etc. These problems have been solved in the later research of scholars, of course, also obtained some good results.
\begin{figure}[htbp]
	\centering
	\includegraphics*[width=14cm]{performancetableofdetr.png}
	\caption{Encoder Self-attention for a set of reference points}
\end{figure}
\newpage
\section{Disscussion}
The rapid progress of object detection over the years
is inseparable from the rapid development of deep learning technology.

Starting from R-CNN, people use deep neural networks
for feature extraction of images to achieve the purpose
of object recognition. In the beginning, the region-based
object detection framework opened up the first region-based
convolutional neural network object detection algorithm.
This model inserts CNN into the sliding window method,
which predicts bounding boxes directly from locations
of the topmost feature map after obtaining the confidences
of underlying object categories. This kind of model obtained
a mAP of 53.3$\%$ with more than 30$\%$ improvement over the
best result (DPM) of VOC dataset\textsuperscript{\cite{20yearssurvey}}. Later researchers
improved the architecture of original R-CNN, and fixed
some problems.For instance, SPP-Net solve the problem that
R-CNN must take a fixed-size input. Fast and Faster R-CNN speeded
up the model. Mask R-CNN improves its accuracy.

Meanwhile regression / classifivation-based framework start to show up.
Region proposal-based frameworks are composed of several correlated stages,
including region proposal generation, feature extraction with CNN,
classification, and bounding box regression, which are usually trained
separately.The release of yolo makes real-time and accurate object
recognition possible. The SSD model uses the idea of multiple anchors,
which makes it easy to detect large and small objects.

In 2020, the proposal of DETR gives a new design paradigm of object
detection model. As the first end-to-end object detection model,
based on Transformer, DETR eliminates the requirement of manual
components such as Anchor and Non-maximum suppression (NMS),
greatly simplifying the object detection process. Now researchers
have proposed a series of variant models based on DETR. For example,
by designing the new attention module, Deformable DETR \textsuperscript{\cite{Deformabledetr}} focuses on the
sampling points around the reference point to improve the efficiency
of cross-attention. Conditional DETR \textsuperscript{\cite{conditionaldetr}}separates DETR query into content
and location parts to clarify the meaning of the query. In addition,
DAB-DETR\textsuperscript{\cite{dabdetr}}treats the query as a 4D Anchor and improves it layer by layer.

\begin{figure}[htbp]
	\centering
	\includegraphics*[width=7cm]{deyoframe.png}
	\caption{DEYO framework}
\end{figure}

These models are gradually approaching the steps of the human
brain for object recognition. The introduction of a model paradigm
is necessarily followed by improvements and the formation of model
variants. Many efforts have been made to improve the existing models.
DEYO \textsuperscript{\cite{deyo}}model has been proposed,
which solves the slow convergence problem of DETR and improves
the efficiency of the detector.




\newpage
\section{Summary}
From the development of object detection algorithms, people's understanding of object detection is from complex to simple. The earliest non-deep learning object detection algorithms use pure mathematical methods for detection, which are not only slow, but also have low accuracy.

Since R-CNN, the idea of improving the object detection algorithm has been arisen, and the traces of prior knowledge are becoming less and less. The Two Stage Model uses two kinds of neural networks for feature extraction and anchor generation respectively, and then combines them together. Such a large amount of convolution calculation cannot meet the performance needs of real-time monitoring in real scenes.

Later, people improved the existing object detection algorithm. Starting from the SSD algorithm, only one neural network was needed for convolution operation, which accelerated the efficiency of image target recognition. YOLO was later used to improve existing algorithms, making object detection more efficient and enabling real-time monitoring with lower performance configurations. YOLO has played a great role in robotics, autonomous driving, medical care, surveillance, and other fields.

However, YOLO is still an anchor based object detection algorithm with human prior knowledge of anchor boxes. Later, Transformer-based object detection algorithms were proposed as an end-to-end model, in which the processing and perception of images are more similar to the way the human brain processes images. Moreover, the improvement of DETR by subsequent generations of researchers has led DTER to strive for higher accuracy.

Similarly, the improvement of object detection algorithms is inseparable from the improvement of basic deep learning models, from the update of convolutional networks to the proposed Transformer architecture. A new generation of object detection algorithms are generated based on the latest deep learning models. I believe that in the near future, people will improve artificial intelligence models that are closer to the way humans think. And there will be more effective models in the target detection algorithm.

Finally, I would like to thank Ms. Wen for her teaching and guidance. Your guidance has benefited me a lot.

\newpage
\clearpage
\phantomsection
\addcontentsline{toc}{section}{6 \quad Reference}
\begin{thebibliography}{99}
	\bibitem{ref1}D. G. Lowe, "Object recognition from local scale-invariant features," Proceedings of the Seventh IEEE International Conference on Computer Vision, 1999, pp. 1150-1157 vol.2, doi: 10.1109/ICCV.1999.790410.
	\bibitem{ref2}N. Dalal and B. Triggs, "Histograms of oriented gradients for human detection," 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05), 2005, pp. 886-893 vol. 1, doi: 10.1109/CVPR.2005.177.
	\bibitem{ref3}P. Viola and M. Jones, "Rapid object detection using a boosted cascade of simple features," Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, 2001, pp. I-I, doi: 10.1109/CVPR.2001.990517.
	\bibitem{timeline} S. Liu, Y. You, H. Su, G. Meng, W. Yang, and F. Liu, “Few-Shot Object Detection in Remote Sensing Image Interpretation: Opportunities and Challenges,” Remote Sensing, vol. 14, no. 18, p. 4435, Sep. 2022, doi: 10.3390/rs14184435. [Online]. Available: http://dx.doi.org/10.3390/rs14184435
	\bibitem{rcnn}Girshick, R., Donahue, J., Darrell, T.,Malik, J. (2014). Rich feature hierarchies for accurate object detection and semantic segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 36(8), 1625-1639.
	\bibitem{fastrcnn} Girshick, Ross, Jeff Donahue, Trevor Darrell, and Jitendra Malik. "Fast R-CNN." IEEE transactions on pattern analysis and machine intelligence 38, no. 10 (2015): 1954-1967.
	\bibitem{pedestrian}P. Dollar, C. Wojek, B. Schiele, and P. Perona, “Pedestrian detection: An evaluation of the state of the art,” IEEE transactions on pattern analysis and machine intelligence, vol. 34, no. 4, pp. 743–761, 2012.
	\bibitem{pedestrian2} S. Zhang, R. Benenson, and B. Schiele, “Citypersons: A diverse dataset for pedestrian detection,” in The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), vol. 1, no. 2, 2017, p. 3.
	\bibitem{fasterrcnn}Ren, S. et al. (2016) Faster R-CNN: Towards real-time object detection with region proposal networks, arXiv.org. Available at: https://arxiv.org/abs/1506.01497 (Accessed: December 19, 2022).
	\bibitem{fastrcnnrefinement}M.-C. Roh and J.-y. Lee, “Refining faster-rcnn for accurate object detection,” in Machine Vision Applications (MVA),
	\bibitem{fasterrcnnpedestrian} L. Zhang, L. Lin, X. Liang, and K. He, “Is faster rcnn doing well for pedestrian detection?” in European Conference on Computer Vision. Springer, 2016, pp. 443–457.
	\bibitem{twostagemodel} https://towardsdatascience.com/r-cnn-fast-r-cnn-faster-r-cnn-yolo-object-detection-algorithms-36d53571365e
	\bibitem{maskrcnn}He, K. et al. (2018) Mask R-CNN, arXiv.org. Available at: https://arxiv.org/abs/1703.06870 (Accessed: December 19, 2022).
	\bibitem{ssd}He, K. et al. (2018) Mask R-CNN, arXiv.org. Available at: https://arxiv.org/abs/1703.06870 (Accessed: December 19, 2022).
	\bibitem{ssdenhancement}J. Jeong, H. Park, and N. Kwak, “Enhancement of ssd by concatenating feature maps for object detection,” arXiv preprint arXiv:1705.09587, 2017.
	\bibitem{boostdeeplearning}R. Xiao, L. Zhu, and H.-J. Zhang, “Boosting chain learning for object detection,” in Computer Vision, 2003. Proceedings. Ninth IEEE International Conference on. IEEE, 2019, pp. 709–715
	\bibitem{featurefusedssd}G. Cao, X. Xie, W. Yang, Q. Liao, G. Shi, and J. Wu, “Feature-fused ssd: fast detection for small objects,” in Ninth International Conference on Graphic and Image Processing (ICGIP 2017), vol. 10615. International Society for Optics and Photonics, 2018, p. 106151E.
	\bibitem{relationnetwork}H. Hu, J. Gu, Z. Zhang, J. Dai, and Y. Wei, “Relation networks for object detection,” in Computer Vision and Pattern Recognition (CVPR), vol. 2, no. 3, 2018.
	\bibitem{yolov1}Redmon, J. et al. (2016) You only look once: Unified, real-time object detection, arXiv.org. Available at: https://arxiv.org/abs/1506.02640 (Accessed: December 19, 2022).
	\bibitem{ioufunction} https://medium.com/analytics-vidhya/iou-intersection-over-union-705a39e7acef
	\bibitem{speedcomp} N. Vasilache, J. Johnson, M. Mathieu, S. Chintala, S. Piantino, and Y. LeCun, “Fast convolutional nets with fbfft: A gpu performance evaluation,” arXiv preprint arXiv:1412.7580, 2014.
	\bibitem{yolo9000}J. Redmon and A. Farhadi, “Yolo9000: better, faster, stronger,” arXiv preprint, 2017.
	\bibitem{yolov3}Redmon, J. and Farhadi, A. (2018) Yolov3: An incremental improvement, arXiv.org. Available at: https://arxiv.org/abs/1804.02767 (Accessed: December 19, 2022).
	\bibitem{detr}Carion, N. et al. (2020) End-to-end object detection with Transformers, arXiv.org. Available at: https://arxiv.org/abs/2005.12872 (Accessed: December 19, 2022).
	\bibitem{multipathnetwork}S. Zagoruyko, A. Lerer, T.-Y. Lin, P. O. Pinheiro, S. Gross, S. Chintala, and P. Dollar, “A multipath network for object detection,” arXiv preprint arXiv:1604.02135, 2016.
	\bibitem{20yearssurvey}Zou, Z. et al. (2019) Object detection in 20 years: A survey, [1905.05055v1] Object Detection in 20 Years: A Survey. Available at: https://arxiv-export1.library.cornell.edu/abs/1905.05055v1 (Accessed: December 19, 2022).
	\bibitem{Deformabledetr} Zhu, X. et al. (2021) Deformable detr: Deformable Transformers for end-to-end object detection, arXiv.org. Available at: https://arxiv.org/abs/2010.04159 (Accessed: December 19, 2022).
	\bibitem{conditionaldetr}Meng, D. et al. (2021) Conditional detr for fast training convergence, arXiv.org. Available at: https://arxiv.org/abs/2108.06152 (Accessed: December 19, 2022).
	\bibitem{dabdetr}Liu, S. et al. (2022) DAB-detr: Dynamic anchor boxes are better queries for detr, arXiv.org. Available at: https://arxiv.org/abs/2201.12329 (Accessed: December 19, 2022).
	\bibitem{deyo}Ouyang, H. (2022) Deyo: Detr with Yolo for step-by-step object detection, arXiv.org. Available at: https://arxiv.org/abs/2211.06588 (Accessed: December 19, 2022).
	\bibitem{review}Z. -Q. Zhao, P. Zheng, S. -T. Xu and X. Wu, "Object Detection With Deep Learning: A Review," in IEEE Transactions on Neural Networks and Learning Systems, vol. 30, no. 11, pp. 3212-3232, Nov. 2019, doi: 10.1109/TNNLS.2018.2876865.
	\bibitem{facialdetection}P. Hu and D. Ramanan, “Finding tiny faces,” in Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on. IEEE, 2017, pp. 1522–1530
	\bibitem{transformer}Vaswani, Ashish, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. “Attention is All you Need.” ArXiv abs/1706.03762 (2017): n. pag.
\end{thebibliography}


\end{document}