\documentclass[5p]{elsarticle}

\usepackage{lineno,hyperref}
\modulolinenumbers[5]

%\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{amsmath}
%\usepackage{latexsym}
%\usepackage{cite}
\usepackage{color}
%\usepackage{lineno}

\usepackage[noend]{algpseudocode}

\usepackage{algorithmicx,algorithm}
\usepackage{multirow}
\usepackage{bbm}

\renewcommand{\topfraction}{1.0}
\renewcommand{\bottomfraction}{1.0}
\renewcommand{\textfraction}{0.0}

\journal{Engineering Applications of Artificial Intelligence}

%%%%%%%%%%%%%%%%%%%%%%%
%% Elsevier bibliography styles
%%%%%%%%%%%%%%%%%%%%%%%
%% To change the style, put a % in front of the second line of the current style and
%% remove the % from the second line of the style you would like to use.
%%%%%%%%%%%%%%%%%%%%%%%

%% Numbered
%\bibliographystyle{model1-num-names}

%% Numbered without titles
%\bibliographystyle{model1a-num-names}

%% Harvard
%\bibliographystyle{model2-names.bst}\biboptions{authoryear}

%% Vancouver numbered
%\usepackage{numcompress}\bibliographystyle{model3-num-names}

%% Vancouver name/year
%\usepackage{numcompress}\bibliographystyle{model4-names}\biboptions{authoryear}

%% APA style
%\bibliographystyle{model5-names}\biboptions{authoryear}

%% AMA style
%\usepackage{numcompress}\bibliographystyle{model6-num-names}

%% `Elsevier LaTeX' style
\bibliographystyle{elsarticle-num}
%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\begin{frontmatter}

%\title{SASCNet: Shape-Aware Siamese Convolutional Network for Change Detection} 
\title{Change Detection in Images using Shape-Aware Siamese Convolutional Network}

%% or include affiliations in footnotes:

\author[nwpu_address]{Suicheng Li}
\ead{2019\_lsc@mail.nwpu.edu.cn}

\author[nwpu_address]{Pengcheng Han}
\ead{hanpc1125@mail.nwpu.edu.cn}

\author[nwpu_address]{Shuhui Bu}
\ead{bushuhui@nwpu.edu.cn}

\author[nwpu_address]{Pinmo Tong}
\ead{2018200078@mail.nwpu.edu.cn}

\author[nwpu_address]{Qing Li}
\ead{287407782@qq.com}

\author[Zhengzhou_address]{Ke Li}
\ead{like19771223@163.com}

\author[aerospace_address]{Gang Wan}
\ead{casper\_51@163.com}




%_ in email should be \_

%\author[nwpu_address]{Gong Cheng}
%\ead{gongcheng@mail.nwpu.edu.cn}

%\author[nwpu_address]{Junwei Han\corref{mycorrespondingauthor2}}
%\cortext[mycorrespondingauthor2]{Corresponding author}
%\ead{jhan@nwpu.edu.cn}

\address[nwpu_address]{Northwestern Polytechnical University, China}
\address[Zhengzhou_address]{Zhengzhou Institute of Surveying and Mapping, China}
\address[aerospace_address]{Aerospace Engineering University, China}

%\address[ieu_address]{Information Engineering University, China}


\begin{abstract}
\indent Change detection gradually becomes a core technique due to its wide applications of image or video analysis like land cover analysis and real-time monitoring system. Recently, siamese convolutional networks have been adopted for change detection which demonstrate the state-of-the-art performance. Although most of the previous works have better location accuracy, these methods can't avoid side effects such as coarse boundaries and empty holes. 
%
In this paper, we propose a shape-aware siamese convolutional network (SASCNet) to simultaneously integrate different information for change detection with three steps in an unified network. 
%
In the first step, we extract multi-dimension features from paired images and select multi-level change maps generated by a novel siamese encoder-decoder network with multi-scale supervisions. In the second step, we integrate these change maps to obtain complementary information in detail. Finally, we use a residual fine-tune module to refine the predicted change maps and enhance the performance. 
%
Because of rich information in different levels and multi-scale supervisions, the predicted change maps could provide precise positioning as well as high-quality shapes. Experimental results on ``CDnet 2014 dataset” and ``AICD-2012 dataset" show that our method outperforms the state-of-the-art methods in most challenging conditions.
\end{abstract}

\begin{keyword}
Change Detection in Images \sep SASCNet \sep  Change Map Fusion \sep Fine-tune
\end{keyword}

\end{frontmatter}

\linenumbers



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
Change detection is a difficult and critical task by comparing paired images under the same circumstance of time and place \cite{singh1989review}. The primary goal of change detection is to find differences between objects or scenes. Therefore, change detection is a field of much significance in modern society and has been widely used in real-time monitoring system \cite{pathak2014new, Cheng2015A}, land cover analysis \cite{alcantarilla2016street-view, ghouaiel2016coupling, lyu2016learning, Shang2014Change, cui2019unsupervised, zhao2020novel, canty2020statistical} and fine-grained detection \cite{Feng2015Fine, Rui2017Learning}.
%
In our daily life, when people see paired images from the same place at different times, it is natural and easy for us to distinguish  the difference between them. Because of great capacity for managing and analyzing of semantic information, human visual system is effective to locate changed regions.
However, although computer vision is developing very rapidly in this field, it is still hard and challenging to detect change with high quality. This is because many factors may affect the quality of results. For example, sudden or gradual illumination variations \cite{Cheng2015A} may cause considerable difficulties in some change detection methods.

\begin{figure}[tb]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/my_images/contrast.jpg}
	\end{center} \vspace{-4mm}
	\caption{{Visual examples of our method (SASCNet) compared to Cascade \cite{wang2017interactive} on some challenging conditions, such as zooming, rotating and camera jitter. Columns 1-5 show paired image 1 ($I_1$), paired image 2 ($I_2$), results of Cascade, results of SASCNet as well as ground-truth (GT), separately. As we can see, the shape of our results are clearer, which has clean boundaries.	
	}} \label{1_contrast}
\end{figure}

In recent years, with the development of deep learning, more and more researchers apply fully convolutional networks (FCNs) \cite{badrinarayanan2017segnet, lin2017refinenet, long2015fully} to change detection \cite{alcantarilla2016street-view, sakurada2015change, khan2016learning, guo2018learning}. In these deep learning methods, siamese convolutional networks (SCNs) \cite{brown2011discriminative, zagoruyko2015learning} have shown great power for change detection \cite{guo2018learning, han2019aerial}. SCN is a kind of supervised method for metric learning to analyze similarities of image pairs. Because two input paired images in change detection task have similarities in unchanged regions and differences in changed regions, it's very appropriate to deal with change detection using SCNs. 


Most state-of-the-art SCNs \cite{zhan2017change, guo2018learning} use change maps calculated from paired images in Euclidean distance. The change maps are used directly to score the dissimilarities between paired images at different time. And they are trained by margin contrast loss \cite{hadsell2006dimensionality} to enlarge the distance in changed regions and reduce the distance between unchanged regions. Although the existed methods achieve great results in localization (see Cascade \cite{wang2017interactive} in Fig. 1), the predicted change maps are still coarse in boundaries and shapes because of undesirable segmentation.
%
In order to get accurate change maps, there are three main challenges to overcome:
%
\textbf{I)} Changed regions are primarily defined on the global contrast of the paired image, which means most methods only focus on the localization, not on the precise pixel features. Therefore, the shapes of predicted change maps are irregular because of the unsatisfactory segmentation and even incorrect when the quality of input images is poor, such as dynamic background and camera jitter. 
%
To deal with this problem, new networks that could sufficiently leverage the multi-context or multi-scale information are needed; 
%
\textbf{II)} Most existed methods only consider using single change map calculated from paired images or concatenation of paired images instead of multi-level change maps in different dimensions. However, change detection is a special and hard task, which has two similar input images different from other computer vision tasks, such as object detection \cite{tan2020complex, Cevikalp2017Visual}, semantic segmentation \cite{long2015fully, badrinarayanan2017segnet, kestur2019mangonet:}, and salient object detection \cite{noori2020dfnet:, liu2020deep}. Evidently, due to insufficient information, single change map or concatenation of paired images will be incomplete under the challenging conditions of illumination changes, rotating and zooming. As a result, there will be of a lot of noise and errors in the results. For these complicated cases, more change feature representations should be adopted to obtain complementary information in detail;
%
\textbf{III)} In order to refine the results and maintain smooth object boundaries, some methods use post-processing, such as conditional random fields (CRF)  or manual threshold \cite{wang2017interactive}. Nevertheless, CRF methods tend to take a lot of time compared to end-to-end learning, which leads to low inference speed. And manual threshold based methods may not fit all situations due to artificial operation. Therefore, a time-saving module, that can be well adapted in the whole structure and has strong ability to refine the coarse results, is needed. 



For resolving above problems, we propose a novel framework called as shape-aware siamese convolutional network (SASCNet), which focuses on detecting complete shape of the changed regions. 
%
In order to obtain accurate detection results, our method pays attention to the global information in image level and the detailed information in pixel level. Therefore, a deep and multi-supervised network which can get delicate information of different-level change maps is required. In this paper, we design a siamese encoder-decoder network to obtain semantic information and low-level local details at the same time. 
%
Then, how to make use of various information is key to getting complete shapes and clear boundaries of change maps. In order to achieve this goal, we try to select three-level change maps representing edge information, location information and semantic information respectively to obtain complementary information. 
%
Last but not the least, to provide precise positioning and especially to make object shapes complete and accurate, we propose a residual fine-tune module to refine the predicted map by adding itself with a detailed residual learned between previous change map and ground truth. 


To verify the effectiveness of the proposed SASCNet, we evaluate it on several popular change detection datasets and compare our results with the state-of-the-art methods. The experimental results demonstrate that SASCNet generates better results (eg. results in Fig. 1), which preserve more details such as boundaries and shapes in some extremely challenging conditions.
%
Ultimately, main contributions of our work are summarized as follows:

\begin{itemize}
	\item  We propose a novel architecture which simultaneously integrates abundant information to preserve accurate shapes of change maps in an unified network without other post-processing like CRF. Therefore, the proposed method achieves faster speed of inference, compared with CRF-based methods.
	
	\item To optimize final change prediction, three complementary change maps are adopted to mutually complete each other on three aspects: edge information, location information, and semantic information. And a residual fine-tune model is introduced to further refine the boundaries of our results.
	%Our model consists of a deep encoder-decoder with multi-supervisions from low-level to high-level layer to learn sufficient information and generate three change maps, a fusion module to fully leverage all the change maps and a fine-tune network to refine fused change map, which significantly improves the boundary of predicted changed regions. 
	
	\item The proposed method achieves the best performance on several widely used datasets compared with the state-of-the-art methods. 
	
	\item In addition, modular design is adopted in the proposed method, which means 
	our network can be easily reconstructed or split for other tasks.
\end{itemize}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Related Work}

%\textbf{Online metric learning}
In recent years, a lot of methods were proposed to detect the changed regions in paired images and some great results have been achieved. There are two typical ways to deal with the work: traditional machine learning methods and deep learning methods. 

\subsection{Traditional Methods} 

Change detection is an important research fields and a lot of researches have been investigated. Before deep learning methods are applied to this field, many attempts have been involved. 

\textbf{1) Handcrafted feature:} Early methods detect change objects by contrasting different pixels based on handcrafted features. Luppino \textit{et al.} \cite{Luppino2017A} demonstrate a clustering-based method to detect changes. They calculate ensemble clusters of two heterogeneous images of the same geographical area at different time and their stacked image respectively. The clusters are split or merged in the different images to recognize changes. 

\textbf{2) Object-based:} Miron \textit{et al.} \cite{miron2015change} present a moving object detection system based on Graph Cut \cite{rother2004"grabcut":}. The method relies on motion modeling using an optical flow algorithm and a classical background subtraction module based on mixture of Gaussians. Zhang \textit{et al.} \cite{zhang2018object} introduce a novel object-based change detection technique, which incorporates multi-scale uncertainty analysis by support vector machine classification. Therefore, this method could detect changes from different scales and deal with very high-resolution (VHR) images. 

\textbf{3) Genetic Programming:} Bianco \textit{et al.} \cite{bianco2017combination} utilize Genetic Programming (GP) to automatically select the best algorithms and combine them in different ways. This method is significantly different from other state-of-the-art algorithms and performs the most suitable post-processing operations on the outputs of the algorithms.

\textbf{4) Pixel-level segmentation:} St-Charles \textit{et al.} \cite{stcharles2015subsense:} demonstrate a universal pixel-level segmentation method that relies on spatiotemporal binary features as well as color information to detect changes. The method could detect camouflaged foreground objects more easily and ignore most illumination variations. Ramirezalonso \textit{et al.} \cite{ramirezalonso2016auto-adaptive} propose a new Background Subtraction System scheme based on two Self Organized Maps (SOM) that adapts in a parallel way at different rates. Isik \textit{et al.} \cite{Isik2018SWCD} demonstrate an effective change detection algorithm for pixelwise changes. They introduce a sliding window approach combined with dynamic control of update parameters to update background frames.

Although these traditional methods achieve good performances in some scenarios, their abilities of extracting features are limited. In the meanwhile, these methods benefit mostly from global contrast that means they ignore some details in boundaries and shapes.  



\subsection{Deep Learning Methods} 
Compared with the above methods, deep learning methods show their advantages in change detection and recently refresh the state-of-the-art records. Typically, deep learning methods have more powerful capabilities to extract features from images which have been used in many fields. Due to the above advantages, some methods based on deep neural networks have been designed for change detection which have better performance. 

\textbf{1) Object-based:} Stent \textit{et al.} \cite{stent2015detecting} propose a novel system for object-based change detection of multiple changes, which utilizes a two-channel convolutional neural network for the detection of changes in multiple views of a tunnel surface. Han \textit{et al.} \cite{han2019aerial} propose a novel change detection network for identifying scene changes about pairs of aerial images. The method utilizes ROIs detection to extract changed regions, which could overcome the wide-baseline problem resulted from aerial images and is robust to noise, light change, and season time change.

\textbf{2) CNN-based segmentation:} Wang \textit{et al.} explore various CNN configurations and propose a highly accurate semi-automatic architecture for segmenting foreground moving objects pictured in surveillance videos, which also achieves great performance in CDnet 2014 dataset \cite{wang2014cdnet}. Bu \textit{et al.} \cite{bu2019mask} introduce a change detection network named Mask-CDNet network containing two collaborative modules. These two modules solve image registration issue in change detection and predict specific change differences respectively.  Mond{\'e}jar-Guerra \textit{et al.} \cite{mondejar2019end} propose an end-to-end deep learning architecture, which consists in two nested networks that are trained together. 

\textbf{3) SCN-based segmentation:} Guo \textit{et al.} \cite{guo2018learning} propose a novel fully Convolutional siamese metric Network (CosimNet) to score change by customizing implicit metrics. They use contrastive loss to reduce the distance between the unchanged feature pairs and to enlarge the distance between the changed feature pairs. 
%Chen \textit{et al.} \cite{chen2019deep} demonstrate a strong multi-scale feature convolution unit (MFCU) for change detection in VHR images. This paper designs two novel deep siamese convolutional networks based on MFCU, which significantly improve the accuracy and speed of inference.

These above methods have achieved good performance in some datasets, but it still has space of promotion in terms of high-quality segmentation and clearer boundaries. In our research, we explore various SCN-based architectures and focus on how to take full advantage of the multi-context information to generate change maps. We also present a well-designed fine-tune module to refine the predicted change map in the end of our whole architecture, which significantly improves the performance. 


\begin{figure*}[!h]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/SASCNet.jpg}
	\end{center} \vspace{-4mm}
	\caption{{The pipeline of our proposed Shape-Aware Siamese Convolutional Network: SASCNet, which consists of three modules: change map extraction module (CMEM), change map fusion module (CMFM), and fusion fine-tune module (FFM).
	}} \label{2_SASCNet}
\end{figure*}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Methodology}
The overall architecture of the proposed method is shown in Fig. 2. The proposed SASCNet consists of three modules: change map extraction module (CMEM), change map fusion module (CMFM), and fusion fine-tune module (FFM). 
%
Change map extraction module is designed as a progressive encoder-decoder network with multi-scale supervisions, which learns to extract high-quality change maps from paired images. Change map fusion module adds different features to get rich information of change maps. Fusion fine-tune module refines the rich change map of the previous module by a residual encoder-decoder network.


\subsection{Change Map Extraction Module}
In order to obtain low-level local details and high-level semantic information at the same time, the network needs generate multi-level feature maps. Therefore, the proposed change map extraction module adopts a siamese encoder-decoder architecture inspired by SegNet \cite{badrinarayanan2017segnet:} and U-net \cite{Ronneberger2015U}.
%
We need a well-formed backbone of our network with fast convergence speed and strong learning ability. Here we use the \textit{ResNet} \cite{he2016deep} shown in other progressive deep learning methods \cite{lin2017refinenet:, islam2017gated} to construct the proposed network.
%
The encoder has a modified architecture of \textit{ResNet-34}, which has an input convolution layer and a max pool layer, followed by three layers (\textit{layer1}, \textit{layer2} and \textit{layer3}). These three layers have 3, 4 and 6 basic blocks, respectively. It should be noted that original \textit{ResNet-34} has four layers, but we just use the first three in our module. The reason is that we find using all the layers in original \textit{ResNet-34} can't improve the performance in this module and even slow down the convergence rate.

For the decoder part, we use both transposed convolution layer and bilateral upsampling layer to make this part precise and efficient. 
%
In the first two layers of our decoder, we adopt two transposed convolution layers followed by a batch normalization and a ReLU activation function. To reuse the features in low-level, the input of second transposed convolution layer is the feature map concatenated by the output from its previous stage and its corresponding feature map in the encoder. In order to speed up the convergence rate, we decide to use a simple bilateral upsampling layer in the last layer of our decoder to get the same spatial resolution result as the input size. 

The output of decoder could be denoted as $F_{D}$, which could be computed as:
%
\begin{eqnarray}
\bar{F}^{(2)} = \phi\left({Transpose} \left({F}^{(3)} ; \theta\right)\right), \\
F_{D} = Up \left(\phi \left( {Transpose} \left( \bar{F}^{(2)} \oplus {F}^{(2)}  ; \theta \right) \right) ; t_0 \right), 
\end{eqnarray}
%
where $Transpose(\star; \theta)$ represents a transposed convolution with parameter $\theta$, which aims to get better upsampled image; $\phi()$ denotes a ReLU activation function. $U p(\star; t_0 )$ is a simple bilateral upsampling layer which could up-sample $\star$ to the same size as $t_0$ and $t_0$ represents the input image. $\oplus$ represents pixel-wise add. $F^{(2)}$ and $F^{(3)}$ denote the last two layers in encoder respectively. $\bar{F}^{(2)}$ is the output of the first transposed convolution layer in decoder.

For simplicity, change maps as the pipeline shows in Fig. 2 could be denoted by a features set $\mathbf{C}$:
%
\begin{equation}
\mathbf{C} = \left\{ C_1, C_2, C_3, C_4, C_5 \right\}.
\end{equation}

In order to get more delicate features, we use three supervisions in this module, two in encoder stage and one in decoder stage. According to the experiment, we can get better edge information in this low-level features, which  maintains boundaries of final changed regions. Therefore, in the third layer of the encoder, Euclidean distance of paired feature maps from paired images is calculated and used as change map $C_1$. In addition, it is supervised by the ground truth with contrast loss after a global normalization. 

In order to get location information, the same operations in the last layer of the encoder are performed to fill the final changed regions and this change map is used as $C_2$. Finally, change map $C_3$ is calculated in the last layer of the decoder and contains semantic information.
%
The first three change maps in $\mathbf{C}$ could be computed as:
\begin{equation}
C_k=\left\|{F_k}^1 -{F_k}^2 \right\|_{2}, k = 1,2,3,
\end{equation}	
%
where $F_k^1$ and ${F_k}^2 $ are the feature maps corresponding to the change map $C_k$.

The contrast loss \cite{hadsell2006dimensionality} mentioned above is defined as: 
\begin{equation}
\begin{aligned}
\ell_{c}=\sum_{k=1}^{K}\sum_{i,j} \left[ y_{(i,j)} (C_{k(i,j)})^{2}+(1-y_{(i,j)}) \max ({\delta}-C_{k(i,j)}, 0)^{2}\right],\\
k = 1,2,3,
\end{aligned}
\end{equation}
%
where $y$ is a binary ground truth assigned to the change map $C$. $y_{(i,j)}=1$ if there is no change at the corresponding pixel pair, and $y_{(i,j)}=0$ if there is a change at that region. $\delta$ is a margin, greater than 0. $K$ is the number of the training change maps. When $y_{(i,j)}=1$, this loss function reduces the value to 0 for unchanged regions. However, when $y_{(i,j)}=0$, this loss will be 0 if $C_{i,j}^k > \delta$, and values in changed regions will approach the margin if $C_{i,j}^k < \delta$. Through experiments, $\delta$ is empirically set to 2.

\subsection{Change Map Fusion Module}
In this module, we aim to leverage edge information, location information and semantic information to get complementary information. There are two methods for information fusion: concatenation and pixel-wise add. In our experiment, we compare the above methods and find the latter is more effective. Therefore, the way we choose is to add the change maps in pixel-level. Because of the different size of various change maps, we adopt a bilateral upsampling layer after two change maps calculated in encoder stage.

In order to control the pixel range in the final fusion change map, we decide to use a sigmoid function to make it range to $(0,1)$. The output $C_4$ of this module could be computed as:
%
\begin{equation}
C_4 = Sigmoid\left[U p\left(C_1; C_3\right) + U p\left(C_2; C_3\right) + C_3 \right].
\end{equation}
 

\subsection{Fusion Fine-tune Module}
Because of the subtraction of two feature maps at first in Equation (4), the values we get are relatively small. Therefore, it is hard to use a simple network to make them in a good distribution, where unchanged pixel values approach 0 and changed pixel values are close to 1. 
%
Under this condition, the change map only predicted by extraction module is coarse, whose boundaries are fuzzy. Some methods adopt manual thresholds to get binarization mask \cite{wang2017interactive}. However, manual threshold depends on the trained images, that means this kind of post-processing is not applicable to all situations. Therefore, to get a good prediction, a relatively deep and well-formed structure is required to refine and generate high-quality change maps. 

\begin{figure}[tb]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/my_images/lower_fine_tune.png}
	\end{center} \vspace{-4mm}
	\caption{{Illustration of our fusion fine-tune module. It contains a series of convolution layers and upsampling layers similar to U-net. In addition, we add a residual block to enhance its ability of refinement.
	}} \label{3_lower_fine_tune}
\end{figure}

\begin{table}[tb]
	\small
	\caption{Details of fusion fine-tune module. `Conv’ represents convolution layer and its parameters denote `in channels’, `out channels’, `kernel size’, and `stride’ respectively. Except for the layer1 and the layer8, other floors are followed by a BatchNorm layer and ReLu layer.}
	\begin{center} 
		\begin{tabular}{c|c|c}
			\hline
			\hline
			Stage &  Layer Name & Architecture\\
			\hline
			\multirow{7} * {encoder} &layer1 &Conv(1,64,3,1) \\
			\cline{2-3}
			&layer2 &Conv(64,64,3,1)     \\
			\cline{2-3}
			&pool1 &Maxpool(2)    \\
			\cline{2-3}
			&layer3 &Conv(64,64,3,1)     \\
			\cline{2-3}
			&pool2 &Maxpool(2)     \\
			\cline{2-3}
			&layer4 &Conv(64,64,3,1)     \\
			\cline{2-3}
			&pool3 &Maxpool(2)     \\
			\hline
			\multirow{7} * {decoder} &layer5 &Conv(64,64,3,1) \\
			\cline{2-3}
			&up1 &Upsample(2)    \\
			\cline{2-3}
			&layer6 &Conv(128,64,3,1)   \\
			\cline{2-3}
			&up2 &Upsample(2)    \\
			\cline{2-3}
			&layer7 &Conv(128,64,3,1)    \\
			\cline{2-3}
			&up3 &Upsample(2)    \\
			\cline{2-3}
			&layer8 &Conv(64,1,3,1)    \\
			\hline
		\end{tabular}
	\end{center} 
\end{table}

%\begin{table}[tb]
%	\small
%	\caption{Details of fusion fine-tune module. `Conv’ represents convolution layer and its parameters denote `in channels’, `out channels’, `kernel size’, and `stride’ respectively. Except for the layer1 and the layer8, other floors are followed by a BatchNorm 	\begin{center} 
%		\begin{tabular}{c|c|c|c}
%			\hline
%			\hline
%			\multicolumn{2}{c|}{\textbf{encoder}} &  \multicolumn{2}{c}{\textbf{decoder}} \\
%			\hline
%			layer2 &Conv(64,64,3,1) &up1 &Upsample(2) \\
%			\hline
%			pool1 &Maxpool(2) &layer6 &Conv(128,64,3,1) \\
%			\hline
%			layer3 &Conv(64,64,3,1) &up2 &Upsample(2) \\
%			\hline
%			pool2 &Maxpool(2) &layer7 &Conv(128,64,3,1) \\
%			\hline
%			layer4 &Conv(64,64,3,1) &up3 &Upsample(2) \\
%			\hline
%			pool3 &Maxpool(2) &layer8 &Conv(64,1,3,1) \\
%			\hline
%			\hline
%		\end{tabular}
%	\end{center} 
%\end{table}


The typical fine-tune module \cite{Fu2019Refinet, islam2017salient, Lihe2020A} is designed as a residual block. This kind of structure has superior abilities to refine the fused change map $\mathbf{C}$ with a learned residual between the change maps and the ground truth. 
%
Inspired by those residual blocks, we develop a fusion fine-tune module, FFM, at the end of our whole architecture to correct prediction errors in the previous change map (see in Fig. 3 and Table 1). FFM is a U-net-like encoder-decoder network with connections. It could be defined as:
%
\begin{equation}
\begin{aligned}
residual = \psi (C_4),\\
C_5 = C_4 \oplus residual,
\end{aligned}
\end{equation}
%
where $\psi$ represents FFM and $residual$ denotes the learned residual. 

The FFM efficiently learns to fit a residual that reflects the difference between the ground truth and the previous change map in CMFM. Similar to U-net, we use a series of convolution layers and max pooling layers in the encoder and a series of convolution layers and bilateral upsampling layers in the decoder. Different from two convolution layers in each step of U-net, we just use one convolution layer in our module. 


%\begin{table}[tb]
%	\small
%	\caption{Details of fusion fine-tune module. ‘Conv’ represents convolution layer and its parameters denote ‘in channels’, ‘out channels’, ‘kernel size’ and ‘stride’ respectively. Except for the layer1 and the layer8, other floors are followed by a BatchNorm layer and ReLu layer.}
%	\begin{center} 
%		\begin{tabular}{c|c|c|c}
%			\hline
%			\hline
%			\multicolumn{2}{c|}{\textbf{encoder}} &  \multicolumn{2}{c}{\textbf{decoder}} \\
%			\hline
%			\multirow{2}{*}{layer1} &Multi-Rowand Col &\multirow{2}{*}{layer5} &Conv(64,64,3,1) \\
%			&blblablblbl  & 	& dudu \\
%			\hline
%			\hline
%		\end{tabular}
%	\end{center} 
%\end{table}
%
%\begin{tabular}{|c|c|c|c|c|}
%	\hline
%	\multirow{2}{*}{Multi-Row} &
%	\multicolumn{2}{c|}{Multi-Column} &
%	\multicolumn{2}{c|}{\multirow{2}{*}{Multi-Rowand Col}} \\
%	\cline{2-3}
%	& column-1 & column-2 & \multicolumn{2}{c|}{} \\
%	\hline
%	label-1 & label-2 & label-3 & label-4 &label-5 \\
%	\hline
%\end{tabular}

In the end of decoder, sigmoid function is applied after the addition of the input. The output of this FFM is the final resulting change map of the model and supervised by ground truth. Binary cross entropy (BCE) \cite{boer2005a} is adopted in FFM due to its better performance in refinement. It is defined as:
%
\begin{equation}
\ell_{b c e}=\sum_{(i, j)}[-y_{(i,j)} \log (C_{5(i,j)})-(1-y_{(i,j)}) \log (1-C_{5(i,j)})],
\end{equation}
%
where $y_{(i,j)} \in \left\{0, 1\right\}$ is binary ground truth and $C_5$ denotes the predicted change map.

\begin{figure}[h]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/new_distribution/heatmaps.jpg}
	\end{center} \vspace{-4mm}
	\caption{{Illustrations of heatmaps of change maps. The `semantic' has some holes and a lot of noise. After CMFM, the fused change map is full but very coarse. The refined change map, further obtained through FFM, has clearer shape and clean background. $\oplus$ represents pixel-wise add.
	}} \label{4_heatmaps}
\end{figure}


In order to better comprehend each module in our pipeline, we visualize change maps $\left\{ C_1, C_2, C_3, C_4, C_5 \right\}$, which represent edge, location, semantic, fused change map and refined change map respectively in Fig. 4. We can find $C_1$ preserves better edge information of original input image pairs as well as some noise. In the bottom of our encoder, we extract $C_2$ to get location information. 
In Fig. 4, $C_3$ contains important semantic information, but it has some holes and noise, which is hard to overcome. Therefore, we adopt CMFM to generate fused change map $C_4$, which integrates information with full details. Finally, the refined change map, further obtained through FFM, owns complete shape and precise boundary and has a good distribution in pixel-level.




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Experiments}

In order to evaluate the proposed method, experiments on two frequently used datasets: CDnet 2014 \cite{wang2014cdnet} and AICD-2012 \cite{Bourdis2011Constrained} are conducted. The results demonstrate that our method achieves the best performance among the state-of-the-art methods.



\subsection{Evaluation Metrics}
The experiments are compared using following metrics: \textit{F-measure}, Percentage of Wrong Classifications (\textit{PWC}), and Mean Intersection over Union (\textit{mIoU}). The \textit{F-measure} combines \textit{Precision} and \textit{Recall} into a single metric. \textit{F-measure} is computed based on a number of true positives (\textit{TP}), false positives (\textit{FP}), and false negatives (\textit{FN}) as:
%
\begin{equation}
\begin{aligned}
\text { \textit{F-measure} }&=\frac{2 \times \text { \textit{Precision} } \times \text { \textit{Recall} }}{\text { \textit{Precision} }+\text { \textit{Recall} }} \\
&=\frac{2 \textit{TP}}{2 \textit{TP}+\textit{FP}+\textit{FN}}
\end{aligned}
\end{equation}

\textit{F-measure} doesn't contain true negatives (\textit{TN}). However, \textit{PWC} denotes the percentage of wrong classifications between a predicted change map and its ground truth, which incorporates the influence of true negatives (\textit{TN}). Given a change map, its \textit{PWC} is defined as:
%
\begin{equation}
\textit{PWC}=\frac{100 \times(\textit{FN}+\textit{FP})}{\textit{TP}+\textit{FN}+\textit{FP}+\textit{TN}}
\end{equation}
%
\textit{mIoU} is a ratio between the intersection and the union of the ground truth and predicted segmentation. \textit{mIoU} can be computed as the number of true positives over the sum of true positives, false negatives, and false positives:
%
\begin{equation}
\text { \textit{mIoU} }=\frac{\textit{TP}}{\textit{TP}+\textit{FN}+\textit{FP}}
\end{equation}

\textit{F-measure} represents a balance of \textit{Precision} and \textit{Recall}. It could comprehensively evaluate the quality of a method. \textit{PWC} contains true negatives \textit{F-measure} doesn't have and is a complementary indicator. \textit{mIoU} is one of the standard metrics for segmentation purposes.
%
High \textit{F-measure} and high \textit{mIoU} represent good performance of a method and \textit{PWC} is the opposite.


\subsection{Implementation Details}
In the experiment, all the encoder parameters are initialized from the \textit{ResNet-34}. We utilize the stochastic gradient descent (SGD) optimizer to train our network and its hyperparameters are set to the default values, where the initial learning rate \textit{lr}=1e-4, \textit{momentum}=0.9, and \textit{weight decay}=5e-5. The network is trained until its loss converges. All of the codes are implemented on the PyTorch \cite{paszke2017automatic} framework, and the training hardwares are two GTX 1080Ti GPUs (with 22GB memory altogether). In our experiments, we visualize the change of loss to help us judge whether it converges or not. When the loss no longer drops, we have a preliminary judgment that it converges. Then, if the output images are complete and have clear boundaries, we finally think it already converges and some images are shown in Fig. 8. The training loss converges after about 300 iterations with a batch size of 8 and the whole training process takes about 24 hours. Note that detecting one image will cost about 0.101s.



\begin{figure}[h]
	\begin{center}
		\includegraphics[width=1.00\linewidth]{images/my_images/CMFM.png}
	\end{center} \vspace{-4mm}
	\caption{{Comparisons of two methods for information fusion: concatenation and pixel-wise add. The latter is more effective and generates better edges in our task.
	}} \label{5_CMFM}
\end{figure}

\begin{table}[thb]
	\caption{Comparisons on two methods for information fusion. Note that $\uparrow$ means larger is better and $\downarrow$ is the opposite.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c}  
			\hline \hline
			\multirow{2} * {Strategy}    & \multicolumn{2}{|c } { Metrics }      \\
			\cline{2-3}
			&\textit{PWC} $\downarrow$ &\textit{F-measure} $\uparrow$     \\
			\hline
			concatenation &4.223 &0.943          \\
			pixel-wise add &\textbf{1.942} &\textbf{0.973}                \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}


As mentioned in section 3.2, we show some visualization results about two methods for information fusion in Fig. 5. It could be seen that pixel-wise add performs better on information fusion in our task. It is worth mentioning that concatenation could not produce consistent edges compared to pixel-wise add and requires more parameters. As shown in Table. 2, it could be found that pixel-wise add is more accurate under \textit{PWC} and \textit{F-measure} metrics. As a result, we choose pixel-wise add in CMFM.










\begin{figure*}[tb]
	\begin{center}
		\includegraphics[width=0.8\linewidth]{images/my_images/training_strategy.jpg}
	\end{center} \vspace{-4mm}
	\caption{{Visualization results of three training strategies. Rows 1-4 represent four different conditions of baseline category.  Columns 1-6 show paired image 1 ($I_1$), paired image 2 ($I_2$), results of strategy-1,strategy-2, strategy-3 as well as ground-truth (GT) separately. The strategy-3 has the best performance in all strategies, clear and full.
	}} \label{6_training_strategy}	
\end{figure*}


\subsection{CDnet 2014 Dataset}
CDnet 2014 dataset is one of the most famous change detection datasets with accurate binary ground truth. CDnet 2014 dataset contains realistic, camera-captured, diverse set of indoor, and outdoor videos \cite{wang2014cdnet}. It provides 53 videos categorized into 11 different challenging situations, such as pan-tilt-zoom cameras, camera jitter, night videos, and etc. Spatial resolutions of these frames vary from 320$\times$240 to 720$\times$576 pixels. All the video sequences are recoded at different scenarios, which have sufficient differences for evaluating robustness of change detection methods.

In this paper, we split each video frames at random, $80\%$ for training and the remaining $20\%$ for testing. Note that we ensure all types of change objects are covered and the sample size is balanced during division. And the durations of the videos are from 900 to 7,000 frames \cite{wang2014cdnet}. Some methods use different manual thresholds to get different \textit{F-measure} values \cite{wang2017interactive}. Then, they choose the most suitable threshold which gives the best performance in most cases. In our method, it is worth mentioning that we use the adaptive threshold to get \textit{F-measure} values, which is defined as one-half of the maximum and minimum pixel value. The formula is as follows:
%
\begin{equation}
T=\frac{Max(C(x, y)) + Min(C(x, y))}{2}, 
\end{equation}
%
where $Max(C(x, y))$ and $Min(C(x, y))$ denote the maximum and minimum pixel value of the change map, respectively.


\subsubsection{Training Strategy}

\begin{table}[thb]
	\caption{Results of different training strategies are shown on baseline of CDnet 2014 dataset.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c}  
			\hline \hline
			\multirow{2} * {Strategy}    & \multicolumn{2}{|c } { Metrics }      \\
			\cline{2-3}
			&\textit{PWC} $\downarrow$ &\textit{F-measure}  $\uparrow$     \\
			\hline
			strategy-1 &1.275 &0.745          \\
			strategy-2 &0.723 &0.857                \\
			strategy-3  &\textbf{0.458} &\textbf{0.921}                     \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}

Because of the well-designed structure of the proposed method, different training strategies could be adopted to explore different results and compare them in details. In this paper,we design three strategies and utilize baseline category of CDnet 2014 dataset to validate the effectiveness of each strategy. Three strategies are as follows:
%
\begin{enumerate}
	\item Train the whole network just using the pre-trained \textit{ResNet-34} module. 
	\item Train the change map extraction module using the pre-trained \textit{ResNet-34} module at first, and then train the whole network using the pre-trained change map extraction module.
	\item Similar to 2), but fix the parameters in the pre-trained change map extraction module at second training step.
\end{enumerate}

As can be seen in Table 3 and Fig. 6, we can find the strategy-3 shows the best performance in both visualization and metrics. This is because the whole network is complicated and relatively deep. In addition, the substraction in Equation (4) is an unusual operation compared to other deep learning architectures and the absolute value after substraction is also hard to be processed in backward propagation. As a result, training the whole architecture is a challenging problem. According to our pipeline, CMFM and FFM rely heavily on the outputs of CMEM. For the above reasons, training in stages and fixed parameters are helpful to the convergence of the whole network. Therefore, it's very reasonable to choose strategy-3 for training and all of the following experiments are based on strategy-3.	









\subsubsection{Ablation Experiment}
\begin{table}[tb]
	\caption{Ablation study on different architectures.} \label{market}
	\begin{center}
		\begin{tabular}{l|c}  
			\hline \hline
			{Configurations}    &\textit{F-measure} $\uparrow$      \\
			%&\textit{Prec} &\textit{F-measure} $\uparrow$     \\
			\hline
			Baseline (ResNet-based U-Net)   &0.835             \\
			%			baseline + ms  &88.8 &88     \\
			%			baseline + ms + CMFM    &88 &88                     \\
			%			baseline + ms + CMFM  + FFM  &88 &88\\
			CMEM  &0.884            \\
			%			CMEM + ms   &88 &88                     \\
			CMEM + CMFM  &0.893\\
			CMEM + CMFM  + FFM (SASCNet) &\textbf{0.923}             \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table} 



In this section, we validate the effect of each key components used in the proposed model. As can be seen in Table 4, we adopt ablation experiment and compare their different performances. 
%
First of all, ResNet-based U-Net is used as our baseline to test the performance of general approach. Then we start with our proposed CMEM, which is a modified and progressive architecture lighter than baseline. After that, CMFM and CMEM as a whole are analysed. Finally, we compare the quantitative results of our SASCNet against the above related architectures.
%
From the data in Table 4, we can find that CMEM performs better than baseline. And CMFM and FFM significantly improve the results respectively. It proves that three modules in our SASCNet are very useful for the change detection task.

In addition, it can also be seen from the visualized heatmaps in Fig. 4 that the refined change map generated by SASCNet is significantly clearer than that (see ``semantic" in Fig. 4) only produced by CMEM. It is because single change map is not enough to provide complete information. According to the visualization, the changed map is enriched after the integration in CMFM. And FFM has strong capability to produce outputs in a good distribution in pixel-level.






\begin{figure*}[!h]
	\begin{center}
		\includegraphics[width=0.95\linewidth]{images/my_images/results.jpg}
	\end{center} \vspace{-4mm}
	\caption{{Visualization results of the proposed method and other popular methods on CDnet 2014 dataset. Columns 1-6 represent six different changing conditions of CDnet 2014 dataset. Rows 1-9 show paired image 1 ($I_1$), paired image 2 ($I_2$), results of CL-VID, SWCD, BMN-BSN, IUTIS-5, Cascade, SASCNet as well as ground-truth (GT) separately.
	}} \label{7_results}
\end{figure*}
\textbf{}

\subsubsection{Results}

\begin{table}[tb]
	\caption{Metrics for SASCNet on each CDnet 2014 video category. \textit{Prec} presents \textit{Precision} and \textit{Rec} means \textit{Recall}.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c|c|c}  
			\hline \hline
			\multirow{2} * {Category}    & \multicolumn{4}{|c } { Metrics }      \\
			\cline{2-5}
			&\textit{Prec} $\uparrow$ &\textit{Rec} $\uparrow$ &\textit{PWC} $\downarrow$ &\textit{F-measure} $\uparrow$  \\
			\hline
			baseline  &0.919 &0.924     &0.458 &0.921   \\
			badWeather  &0.957 &0.941    &0.450 &0.949  \\
			cameraJitter    &0.948 &0.942    &0.371 &0.944  \\
			dynamicBG  &0.946 &0.946  &0.216 &0.943  \\
			interOM  &0.944 &0.956   &0.121 &0.949    \\
			lowFramerate   &0.886 &0.923     &0.395 &0.894     \\
			nightVideos  &0.955 &0.833  &0.215 &0.887  \\
			PTZ  &0.871 &0.892     &0.196 &0.878    \\
			shadow   &0.955 &0.968      &0.469 &0.961   \\
			thermal  &0.942 &0.943  &0.416 &0.942 \\	
			turbulence &0.894 &0.832     &0.144 &0.857  \\
			average  &0.925 &0.926 &0.311 &0.923 \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}

The results on CDnet 2014 dataset are listed in Table 5. It shows that the proposed network achieves great performance for most of categories and gets an average \textit{F-measure} more than 0.92. 
%
For some difficult categories such as bad weather and shadow, it even reaches a \textit{F-measure} up to 0.95. In some irrational categories which pictures are not aligned, our model also has good results from the statistics. For example, \textit{F-measure} for pantill-zoom (PTZ) is above 0.87 and \textit{F-measure} for camera jitter is even more than 0.94. The experiment results prove that our model has great capability to process complex semantics.


\begin{table}[tb]
	\caption{Result comparison of the proposed method and other popular methods on CDnet 2014 dataset. Note that NA means the data is not available.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c|c}  
			\hline \hline
			\multirow{2} * {Date} &	\multirow{2} * {Method}    & \multicolumn{2}{c } { Metrics }      \\
			\cline{3-4}
			 & &\textit{PWC} $\downarrow$ &\textit{F-measure} $\uparrow$  \\
			\hline
			2017 & CL-VID \cite{L2017Foreground} &7.477 &0.581 \\
			2016 & AAPSA \cite{ramirezalonso2016auto-adaptive} &2.073 &0.618 \\
			2019 & BMN-BSN \cite{mondejar2019end} &2.906 &0.719 \\
			2015 & SuBSENSE \cite{stcharles2015subsense:}  &1.780 &0.741 \\
			2018 & SWCD \cite{Isik2018SWCD} &1.341 &0.758 \\
			2017 & IUTIS-5 \cite{bianco2017combination}   &1.199 &0.772 \\
			2018 & CosimNet \cite{guo2018learning}  &NA &0.859\\
			2017 & Cascade \cite{wang2017interactive}  &0.405 &0.921\\
			2020 & Ours (SASCNet)    &\textbf{0.311} &\textbf{0.923} \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}

\subsubsection{Discussion}

In addition, we compare the results between the proposed approach and the state-of-the-art methods. Figure 7 shows the results of CL-VID \cite{L2017Foreground}, SWCD \cite{Isik2018SWCD}, BMN-BSN \cite{mondejar2019end}, IUTIS-5 \cite{bianco2017combination}, Cascade \cite{wang2017interactive} and the proposed method separately in different challenging conditions. 
%
Through the resulting figures, we can find that there are some holes and redundant blocks in compared methods. From the respect of boundaries and shapes of the predicted segmentation, it is obvious to see that some results are uneven and incomplete when the background of paired images are ambiguous and blur (see column ``bad Weather" and ``thermal" in Fig. 7). In contrast, our method could generate better results with clear boundaries and complete shapes in most challenging conditions.
%
As can be seen in Table 6, we evaluate and compare the proposed method with other change detection methods in term of \textit{PWC} and \textit{F-measure}. It can be seen that our SASCNet performs best against the state-of-the-art methods under all evaluation metrics. This is because our model has powerful capability to eliminate some outliers and maintain the true values, which leads to great results in those comprehensive metrics.


\begin{table}[tb]
	\caption{Wilcoxon signed rank test statistic (W) and \textit{p-value} (\textit{p}) for \textit{PWC} and \textit{F-measure} comparisons. Note that all results are calculated between SASCNet and compared methods in the table.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c|c|c}  
			\hline \hline
			\multirow{2} * { Compared method}    & \multicolumn{2}{c|} {\textit{PWC}} & \multicolumn{2}{c} {\textit{F-measure}}     \\
			\cline{2-5}
			&W  &\textit{p} < 0.5 &W  &\textit{p} < 0.5  \\
			\hline
			CL-VID  &1.0 &Yes  &1.0 &Yes \\
			AAPSA  &0.0 &Yes  &0.0 &Yes \\
			BMN-BSN    &2.0 &Yes &1.0 &Yes \\
			SuBSENSE   &3.0 &Yes &1.0 &Yes \\
			SWCD   &0.0 &Yes  &1.0 &Yes \\
			IUTIS-5    &8.0 &No &1.0 &Yes \\
			Cascade   &31.0 &No &33.0 &No  \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}

In addition, non-parametric statistical tests are usually adopted to demonstrate the effectiveness of the results \cite{ribeiro2020ensemble}. Here, we use Friedman test and Wilcoxon signed rank test on 11 different categories of CDnet 2014 dataset. 
	
In Friedman test, p-value for \textit{PWC} and \textit{F-measure} are 4.991e-10 (<0.05) and 1.175e-10 (<0.05), respectively. This suggests that \textit{PWC} and \textit{F-measure} in the results differ from each other. Besides, the Table 7 presents the results of Wilcoxon signed rank test for the evaluation of \textit{PWC} and \textit{F-measure}. It is confirmed that CL-VID, AAPSA, BMN-BSN, SuBSENSE, SWCD and IUTIS-5 are inferior to SASCNet at the $5\%$ significance level. Combining Table 6, Table 7 and the results of Friedman test, we can conclude that although the results of SASCNet and Cascade achieve similar values in average \textit{F-measure} of Table 6 and \textit{p-values} > 0.5 in Wilcoxon signed rank test, they are actually different and SASCNet performs better against Cascade under other metrics.


\begin{figure}[thb]
	\begin{center}
		\includegraphics[width=0.95\linewidth]{images/my_images/enlarge.png}
	\end{center} \vspace{-4mm}
	\caption{{Qualitative comparison of the proposed method with the top method (Cascade). Each sample occupies two rows, where the first row is the original images and the second row is the zoom-in view.}
	} \label{8_enlarge}
\end{figure}
Furthermore, our method focuses on region accuracy as well as the boundary quality. Therefore, to evaluate the quality of segmented changed objects, we enlarge a part of the pictures to make them obvious as Fig. 8 shows. Compared with the top method (Cascade), we can see that our method is able to accurately segment changed objects under various challenging conditions, including shadow (1st and 2nd rows) and camera jitter (3rd and 4th rows). We would like to emphasize that the object boundaries of our results are more accurate and clearer than others as Fig. 7 and Fig. 8 show. 

In comparison, it can be found that our approach achieves competitive performance in both visualization and metrics. Note that these results are achieved without any other pre-processing and post-processing.









% FIXME: 仔细参考前面的修改，把后面的实验部分的文字，描述修改一下
% 可以装一个meld软件，仔细对比分析修改过的地方，
%   * 如何弱化'we'
%   * 如何用更客观的方式描述
%   * 如何用多样的句式、词语
%
\subsection{AICD-2012 Dataset}



\begin{table}[thb]
	\caption{Result comparison of the proposed method and other popular methods on AICD-2012 dataset.} \label{market}
	\begin{center}
		\begin{tabular}{l|c|c}  
			\hline \hline
			\multirow{2} * {Method}    & \multicolumn{2}{c } { Metrics }      \\
			\cline{2-3}
			&\textit{F-measure} $\uparrow$ &\textit{mIoU} $\uparrow$ \\
			\hline
			WS-Net \cite{khan2016learning} &0.145  &0.389 \\
			CNN-feat \cite{sakurada2015change}  &0.287  &0.535 \\
			DeconvNet \cite{alcantarilla2018street}   &0.341  &0.625 \\
			Mask-CDNet \cite{bu2019mask}  &0.370  &0.649\\
			Ours (SASCNet)    &\textbf{0.867} &\textbf{0.780}	 \\
			\hline \hline 
		\end{tabular}
	\end{center} 
\end{table}

In order to verify the robustness of our method, we also evaluate it on AICD-2012 dataset \cite{Bourdis2011Constrained}. AICD-2012 is a synthetic dataset generated by a realistic rendering engine of a computer game, which consists of 1,000 pairs of 800$\times$600 images. There are 100 different scenes in this dataset and each scene contains different objects like trees, houses, water, and roads. These scenes are very demanding since all of them contain rather small changed objects with the change of light and shadow. 



\textbf{Implementation Details and Results:} In our experiment, we choose 100 image pairs as train dataset and 25 image pairs as test dataset. To speed up convergence and get better performance, we utilize  dilation operation in morphology to generate a corresponding region of interest (ROI) for each image pair. The hyperparameters of this experiment are almost as same as those of above experiment on CDnet 2014 dataset. In this paper, we change the input size from 512$\times$512 to 800$\times$608. 



The qualitative results on AICD-2012 dataset are visualized in the Fig. 10. It is obvious that the proposed approach achieves a good result on AICD-2012 dataset. The shape of small changed object can be seen clearly in the case of such a large background. As can be seen in Table 8, our method outperforms the state-of-the-arts in terms of both \textit{F-measure} and \textit{mIoU}. Hence, it proves that our framework is able to overcome some extreme conditions. Furthermore, our methods can get better results if we adopt some pre-processing, such as zoom-in and cut.




\subsection{Further Discussion}

\begin{figure}[thb]
	\begin{center}
		\includegraphics[width=0.95\linewidth]{images/my_images/box_pool_results.png}
	\end{center} \vspace{-4mm}
	\caption{{Some examples that our proposed method are insufficient. Rows 1-2 show the results from low frame-rate (port-0-17fps) and baseline (PETS2006), respectively.
	}} \label{9_box_pool_results}
\end{figure}

From the previous experiments, we can conclude that our method is able to efficiently detect changed regions in well-aligned or dithering images (camera jitter category of CDnet 2014 dataset). Changed regions shown above generally have blocky shape, which are relatively complete and large. However, our approach has some limitations when dealing with small changed objects, which are also extremely similar to the background.
%
In equation (4) mentioned above, the subtraction of two feature maps will be close to 0 in this condition, that means the SASCNet can hardly learn any difference in changed regions. 
%
From the aspect of our network, we use a quadruple bilateral upsampling layer in the last layer of decoder in CMEM, that means the module may lose some detailed information.
%
Therefore, \textit{F-measure} is low, some even below 0.6 and some objects are unable to be detected in some extreme cases (see in Fig. 9).
%
In the future work, to deal with the problems showed in Fig. 9, better methods will be investigated, such as active lighting recurrence \cite{zhang2018active} and the precise active camera relocalization \cite{tian2018active}. 


%From the previous experiments, we can conclude that our method is able to efficiently detect changed regions in well-aligned or dithering images (camera jitter of CDnet 2014). Changed regions mentioned above generally have blocky shape, which are relatively complete and large. However, fine-grained change detection is a meaningful subject and has received increasing attention. Therefore, we attempt to figure out whether the proposed method could solve this problem.

%\textbf{FGCD} \cite{feng2015fine-grained} dataset is well-known and has 3 real-world datasets for fine-grained change detection of misaligned scenes under varied illuminations. In our experiments, we choose to test our model with ``Db dataset" in FGCD, which includes 10 groups of images of laboratory testing blocks for the aging simulation tests of mural deterioration. In ``Db dataset", the size of all images is 705$\times$698 and all testing blocks are observed at 7 different illuminations. These images are very challenging on account of fine grain and misalignment with change of illuminations.




%\textbf{Implementation details and results:} In our experiment, we choose 50 image pairs as train dataset and 12 images pairs as test dataset and each image is firstly resized to 800$\times$800. From the results in Fig. 8, we find that our method pays more attention to changed block, which is relatively large in the original paired images. However, the proposed method has problems in coping with the fine-grained changes. In this condition, some small details are lost in the result. We think the reasons for the undesired results are as follows. From the aspect of our network, in the last layer of decoder in CMEM, we use a quadruple bilateral upsampling layer, that means the module will lose some detailed information, even if the lower layer in CMEM provides a lot of information. From the aspect of ``Db dataset", although the illuminations, pose, and position of camera were controlled, there is an angle change in paired images, which makes it harder for the proposed method to detect very small changes.



\begin{figure*}[thb]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/my_images/AICD.jpg}
	\end{center} \vspace{-4mm}
	\caption{{The qualitative results on AICD-2012 dataset. Rows 1-2 show paired image 1 and paired image 2. Row 3 displays the ground-truth, and our results are shown in Row 4.
	}} \label{10_AICD}
\end{figure*}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Conclusion}
We propose a novel siamese convolutional network, named SASCNet for change detection. To get change maps with high quality, the network is designed with three modules: change map extraction module, change map fusion module, and fusion fine-tune module. Change map extraction module is a novel siamese encoder-decoder network with multi-scale supervisions, which aims to extract different feature maps. Change map fusion module could integrate those feature maps into a whole. Fusion fine-tune module has great ability to refine the prediction according to ground-truth and enhance the performance.
%
In practical applications, it has the following advantages: Firstly, the proposed method uses adaptive threshold to get final binary results without any other operations like CRF. As a result, it could detect images or video sequences in a fast speed.
%
Secondly, because of well-designed structure and modular design, our network can be easily split or recomposed for other tasks by changing the processing flow.
%
Finally, experiments on two popular datasets show the proposed method could deal with various challenging conditions, such as illuminations, shadow, and zooming. Some visual results show that the outputs of our method obtain complete shapes and clear boundaries compared with other methods, which demonstrates the effectiveness of the proposed SASCNet.


Although the proposed method achieves great performance on some challenging conditions, there is still some room for improvement in our work. In the future work, we will explore weakly-supervised methods on the existing model.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section*{Acknowledgements}
This work is partly supported by grants from National Natural Science Foundation of China (61573284), and Research Funds for Interdisciplinary Subject, NWPU.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{References}

\bibliography{mybibfile}

\end{document}
