\documentclass[5p]{elsarticle}

\usepackage{lineno, hyperref}
\modulolinenumbers[5]
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{amsmath}
%\usepackage{latexsym}
%\usepackage{cite}
\usepackage{color}
\usepackage{epsfig}
\usepackage{epstopdf}
%\usepackage{lineno}
\usepackage{mathrsfs}
\usepackage[noend]{algpseudocode}
%\usepackage{ulem}
\usepackage{algorithmicx, algorithm}
\usepackage{multirow}
\usepackage{bbm}
\renewcommand{\topfraction}{1.0}
\renewcommand{\bottomfraction}{1.0}
\renewcommand{\textfraction}{0.0}

\journal{Engineering Applications of Artificial Intelligence}


\bibliographystyle{elsarticle-num}


\begin{document}

\begin{frontmatter}     

%\title{Elsevier \LaTeX\ template\tnoteref{mytitlenote}}
%\title{Aerial Image Change Detection using Dual Regions of Interest Networks} 
\title{Multi-domain Incremental Change Detection} 
%\tnotetext[mytitlenote]{Fully documented templates are available in the elsarticle package on \href{http://www.ctan.org/tex-archive/macros/latex/contrib/elsarticle}{CTAN}.}

%% or include affiliations in footnotes:

\author[nwpu_address]{Lean Weng}
\ead{2021200088@mail.nwpu.edu.cn}

\author[nwpu_address]{Pengcheng Han}
\ead{hanpc1125@mail.nwpu.edu.cn}

\author[nwpu_address]{Shuhui Bu}
\ead{bushuhui@nwpu.edu.cn}


\address[nwpu_address]{Northwestern Polytechnical University,  China}

\begin{abstract}
%Multi-domain learning for change detection attempts to use a generalized joint model to learn multiple geographic datasets, thus making the model applicable to different geographic datasets.  However, currently most change detection models can only be trained and tested on single dataset, and a fine-tuning experiment performed sequentially on different geographic scenarios shows that the change detection model catastrophically forgets previously learned knowledge when learning a new geographic scenario and is unable to perform incremental learning. 
Currently most change detection models do not have the capability of multi-domain and incremental learning due to the catastrophically forget. 
%
To address this problem, we propose a multi-domain incremental learning framework for change detection. Given a model trained on a specific geographic domain, the goals are (i) incrementally learn the new geographic domain, (ii) maintaining the performance of the old domain, and (iii) to train the new geographic domain without using the previous dataset. 
%
%A dynamic domain incremental architecture is proposed that assigns generic shared domain-invariant parameters to capture homogeneous semantic features present in all domains, while dedicated domain-specific parameters learn domain-specific information for each domain. 
%
A novel dynamic domain incremental learning architecture is proposed, which utilizes a combination of generic shared domain-invariant parameters and dedicated domain-specific parameters to capture the semantic features that are common to all domains and the domain-specific information, respectively.
%
%The proposed dynamic architecture and multi-layer adaptive distillation approach helps to achieve a good balance between retaining old knowledge and acquiring new knowledge. 
%
%To investigate the problem of incremental detection of change detection models over different geographic domains, we designed and collated a multi-domain change detection dataset containing three different typical geographic scenarios. Experimental results demonstrate the effectiveness of the proposed solution on the dataset.
%
The proposed dynamic architecture and multi-layer adaptive distillation achieves a good balance  between the retention of old knowledge and the acquisition of new knowledge.
%
In order to evaluate the performance of proposed incremental change detection in varying geographical regions, a comprehensive multi-domain change detection dataset is created, incorporating three distinct geographic settings. Experimental results demonstrate the effectiveness of the proposed solution on varying data.
\end{abstract}
 
 
\begin{keyword}
Change Detection, Incremental Learning, Knowledge Distillation
\end{keyword}

\end{frontmatter}


\section{Introduction}
Change detection is one of the important tasks of remote sensing image interpretation. It aims to detect the changes of the surface at different times on remote sensing images, and plays a key role in the fields of urbanization monitoring \cite{huang2013building}, land use change detection \cite{rokni2014water}, disaster assessment \cite{gong2015change} and environmental monitoring \cite{chen2013multi}. With the development of deep learning techniques and the increase of optical remote sensing data, and thanks to the powerful automatic feature extraction capability of deep learning, great progress has been made in research on change detection driven by deep learning techniques, and these methods outperform traditional change detection methods based on manual features.

%\begin{figure}[thb]
%	\begin{center}
%		\includegraphics[width=1.0\linewidth]{images/0.png}
%	\end{center} \vspace{-4mm}
%	
%	\caption{Illustration of challenges of incremental change detection in continuous different domains. First row: We set up three incremental tasks: learning the SYSU model (task 1), then learning CDD (task 2) and PRCV (task 3). Second row: demonstration of the catastrophic forgetting: as the SYSU model is fine-tuned on CDD and PRCV, a dramatic drop in performance on the old dataset can be observed; our approach significantly mitigates this forgetting.} \label{introduction-visualization}
%\end{figure}


\begin{figure}[thb]
	\begin{center}
		\includegraphics[width=1.0\linewidth]{images/1_introduction.png}
	\end{center} \vspace{-4mm}
	
	\caption{Differences and challenges between the domain incremental change detection task and other vision tasks. Label-oriented: In the incremental process of common vision tasks, distillation loss is used to preserve category-specific information, which can be regarded as label-oriented. Feature-oriented: In the incremental process of change detection, if only distillation is used to deal with domain shift, it will lead to knowledge confusion due to the consistency of label space.} \label{knowledge-confused}
\end{figure}

A common assumption for state-of-the-art change detection benchmarks is that all training datasets are accessible and the domain distribution between training and test data is similar. When the change detection model is trained on continuous scenes, it can identify the changed areas in the current geographical scene, but tends to fail in previous  geographical scenes.
%
This is referred to as catastrophic forgetting in incremental learning, where new knowledge overwrites previous knowledge \cite{mccloskey1989catastrophic}. It can be found that catastrophic forgetting when shifting from one region to another could be caused by two factors: (i) due to different background conditions, such as remote sensing image acquisition conditions, resolution and weather, geographical scene transfer is encountered in the ground scene environment; (ii) the characteristics of the change area itself will also change greatly at different times, and the corresponding feature space will also shift accordingly.

As shown in Figure \ref{knowledge-confused}, several mainstream vision tasks, such as image classification, semantic segmentation, object detection in continuously changing scenarios. Although the feature space and label space are shifted between $step_{n}$ and $step_{n+1}$, the labels of particular features are kept consistent. Based on this feature, scholars have proposed many knowledge distillation methods to achieve knowledge transfer by constraining the features of $step_{n+1}$ through the soft-label of $step_{n}$, such as distillation through fully connected classification layers or intermediate feature layers \cite{michieli2021knowledge}.
%
Yan \cite{yan2021dynamically} designed a method to dynamically increase the feature extractor for category increment in order to be able to migrate the network to tasks that are not task related, but the method does not achieve sharing between different feature extractors.
%
Rebuffi \textit{et al.} \cite{rebuffi2017learning} shared most of the parameters and achieved tasks such as multi-domain incrementing by adding specific parameters, but fixed the other parameters completely.
%
This type of hard-sharing approach is not suitable for the task of geographic scene changes that are less relevant between scenes \cite{vafaeikia2020brief}, Garg \textit{et al.} \cite{garg2022multi} combines the konwledge distillation and parameters isolation to achieve multi-domain incremental semantic segmentation.

However, in multi domain change detection, although feature space distribution differs between $step_{n}$ and $step_{n+1}$, the label space maintain consistency as shown in Figure \ref{knowledge-confused}. The label-oriented approach to knowledge distillation may lead to knowledge confusion and make the model forget the previous data distribution \cite{lin2022knowledge}. 
%This phenomenon will be analyzed in detail in section \ref{Methods}. 
%
Therefore, we propose a domain incremental framework applicable to change detection. The framework utilizes a feature-oriented approach and aims to distinguish the boundary between old and new domains. Specifically, we divide the parameters into shared and specific parameters. The shared parameters are used to extract shared features between different domains, while the specific parameters are responsible for learning specific features of the new domain. In this way, we effectively solve the problem of knowledge confusion that arises in the case of consistent label space. To the best of our knowledge, the proposed work is the first attempt of multi domain incremental learning (MDIL) for change detection. Our main contributions are summarized as follows.
%
\begin{enumerate}
	\item We explicated the issue of knowledge confusion stemming from label space consistency in the change detection task, and devised a domain incremental model suitable for change detection. The parameters of the network have been segregated into two distinct groups: domain-specific parameters and domain-shared parameters. The domain-specific parameters are utilized to learn new domain-specific knowledge, while a novel distillation loss is introduced to update the domain-shared parameters, thereby facilitating soft-sharing across multiple domains.

	
	\item The change detection domain residual unit (CD-DRU) is designed to extract the specific information of the new domain, which enables the decomposition of the feature space, and then enables the extraction of shared features in the feature space by knowledge distillation.
	
	\item To achieve soft knowledge transfer in continuous domains in change detection, we propose a multi-level feature graph distillation method. By utilizing fully connected layers and intermediate layer distillation techniques, we extract intermediate feature layers in multiple dimensions to facilitate the sharing of similar features between different domains.
\end{enumerate}

To validate the effectiveness of the proposed method, we designed and collated a multi domain change detection dataset (MD-CDD) containing three geographic environments with significantly different acquisition conditions and regions. After extensive experiments, the results demonstrated that our method outperforms existing incremental learning methods.



\section{Related Work}

This research direction is related to change detection and multi-domain incremental learning, and the latest researches in each direction are discussed and analyzed. 


\subsection{Change Detection}

Remote sensing image change detection refers to the use of specific algorithms to classify the dual-temporal images acquired by remote sensing techniques at the pixel level and to obtain a change map. Most early remote sensing image change detection methods used hand-designed features and supervised classification algorithms. 
The booming deep learning techniques, especially deep convolutional neural networks (CNN), which learn multi-level abstractions of data representation have been widely used in computer vision \cite{lecun2015deep} and remote sensing \cite{zhu2017deep}. Many change detection algorithms based on deep learning have been proposed and demonstrated \cite{liu2016deep} outperform traditional methods.

In the change detection task, the two inputs of the model are two remote sensing images acquired at two moments and the correct use of these two inputs and the features extracted from them is crucial for the good performance of the change detection model. 
%
Daudt \textit{et al.} \cite{daudt2018fully} made the first attempt to use Full Connected Network(FCN) for change detection tasks and investigated different ways of fusing features. 
%
Li \textit{et al.} \cite{li2020change} used a new feature fusion method and analyzed the effect of depth supervision on change detection accuracy. With the development of deep feature extraction networks, there have also been many UNet-based \cite{fang2021snunet} and Transformer \cite{chen2021remote} change detection methods.

Remote sensing image change detection methods are rapidly evolving, and researchers are currently focusing on improving change detection accuracy on fixed scenes. However, existing methods require all training data to be provided to the detection model at once, while they do not have the ability to continuously accept data streams and perform incremental learning.


\subsection{Multi-domain incremental learning}

From a task perspective, the literature has successively studied image classification \cite{yan2021dynamically}, semantic segmentation \cite{zhang2022representation}, and object detection \cite{feng2022overcoming} among other tasks in incremental scenarios.
%
From the perspective of incremental objects, it can be divided into class incremental, task incremental, and domain incremental \cite{van2019three}. 
%
In terms of incremental learning methods, the main methods can be grouped into three categories: parameter constraints, construction of memory pools, and dynamic network structure methods \cite{parisi2019continual}. 
%
Among them, both parameter constraints and dynamic network structure methods do not require storing previous data, parametric constraints methods represented by knowledge distillation are widely used in category incremental scenarios, but this method can lead to knowledge confusion when the correlation between old and new tasks is low.  iCaRL \cite{rebuffi2017icarl} algorithm uses knowledge distillation to avoid excessive removal of knowledge from the network, while WA \cite{zhao2020maintaining} adds a bias correction layer after the FC layer to offset the category bias of the new data when using distillation losses without having to apply the validation set.

Multi-domain incremental learning(MDIL) refers to the sequential learning of a single task over multiple visual domains with potentially different label spaces, such as the task of classifying only images on different datasets. Therefore, some scholars have designed progressive neural networks \cite{rusu2016progressive}, dynamically scalable networks (DEN) \cite{yoon2017lifelong} and partially controllable modular networks \cite{rosenfeld2018incremental} and other dynamic network structures. 
%
Dynamic network structure methods dedicating a subset of domain-specific parameters to each unique task can also be effective in mitigating forgetting. For this purpose, Rebuffi \textit{et al.} introduced series \cite{rebuffi2017learning} and parallel \cite{rebuffi2018efficient} residual adapters in an attempt to define a generic parameterization of multi-domain networks by using domain-specific and shared network parameters. Guo \textit{et al.} \cite{guo2019depthwise} also used a similar approach to solve multi-domain image classification incremental tasks, but because the shared parameters of this approach only load ImageNet pre-training parameters and do not participate in updates when training new domains, the so-called hard-sharing approach. Therefore, it does not work well for dense pixel classification tasks such as change detection.

Recently, Garg \textit{et al.} \cite{garg2022multi} proposed incremental learning across different domains and categories for semantic segmentation, the core idea is to learn domain-specific knowledge by adding parameters and knowledge distillation to achieve soft-sharing of the remaining parameters, however, the distillation performed by this method only from the FC layer is not applicable to tasks such as change detection where the label representations are the same but the label spaces are different. 
%
Therefore, we should think about how to better extract the common feature expressions between different domains and the unique expressions of the specific domain itself on the change detection task.

In multi-domain incremental tasks, the process between two adjacent steps can also be considered as a kind of supervised domain adaptation with source domain agnostic, i.e., the target domain is known to be labeled. As Liu \textit{et al.} \cite{liu2022end} in the field of change detection, the use of generative adversarial network (GAN) networks is proposed to reduce the gap between domains, but the data to the source domain needs to be used during training. Class-incremental domain adaptive \cite{kundu2020class} focuses on source-free domain adaptation while also learning new classes in the target domain. However, all these efforts address the problem of domain adaptation, where source knowledge is typically adapted to the target domain, while our work in IL focuses on learning in the target domain while maintaining the performance of the source domain.




\section{Methodology}\label{Methods}
%\textbf{Knowledge Confusion} We first formally introduce the KD method, then we illustrate how the vanilla ensemble KD method functions, including both logits-based and feature-based cases. Given a teacher and a student network, we denote the logits of two networks as $a^t$ and $a^s$. Then KD encourages that the logits of the student network mimic those of the teacher network by minimizing the following loss:
%\begin{equation}\label{knowledge distillation}
%\begin{aligned}
%\mathcal{L}_{k d}=\mathcal{H}\left(\boldsymbol{p}^s, \boldsymbol{p}^t\right)&=\mathcal{H}\left(\sigma\left(\boldsymbol{a}^s ; T\right), \sigma\left(\boldsymbol{a}^t ; T\right)\right)\\
%&=-\sum_{k=1}^K p^t[k] \log p^s[k]
%\end{aligned}
%\end{equation}%
%%
%where $T$ is the temperature to soften the logits for more fine-grained information, $\sigma(\cdot)$ is the softmax operation with temperature $T$, $\mathcal{H}(\cdot, \cdot)$ is the cross-entropy loss to measure the discrepancy of softened probabilistic output between the student and teacher,
%\textbf{Gradient Conflict in \ref{knowledge distillation}.} Although the label spaces of the $step_{n+1 }$ dataset enables the standard knowledge distillation optimization in \ref{knowledge distillation}, it can cause training difficulty when the label space of the dataset remains unchanged and there are feature conflicts. To further analyze the negative effect caused by feature conflict, we consider an
%example and show the step of updating a single parameter $θ$ that contributes to the output $O$ of an arbitrary class $k$ in the last layer of the network. Given an image $X_{1}$ from one dataset that is labeled as $k$ at position ($h$, $w$), the gradient propagated by the loss at position ($h$, $w$) to parameter $θ$ can be calculated as:
%
%\begin{equation}
%\frac{\partial \mathcal{L}_{k d}}{\partial \theta}=\frac{\partial O_s^{(h, w, k)}}{\partial \theta}\left(P_s^{(h, w, k)}-P_t^{(h, w, k)}\right)
%\end{equation}
%%
%Now, consider an identical image $X_{2}$ that originates from another dataset defined by a different feature space. Combining the two cases, the gradient update for parameter $θ$ becomes:
%
%\begin{equation}
%\begin{aligned}
%\frac{\partial \mathcal{L}_{s e g}^{c e}}{\partial \theta}= & \frac{\partial O_1^{(h, w, k)}}{\partial \theta}\left(P_1^{(h, w, k)}-Y_1^{(h, w, k)}\right) \\
%& +\frac{\partial O_2^{(h, w, k)}}{\partial \theta}\left(P_2^{(h, w, k)}-Y_2^{(h, w, k)}\right) .
%\end{aligned}
%\end{equation}
%%
%Note that, since $X_{1}$ and $X_{2}$ are identical areas, $\frac{\partial O_1^{(h, w, k)}}{\partial \theta}= \frac{\partial O_2^{(h, w, k)}}{\partial \theta}$. However, since the two arears originate from different datasets, we have $Y_1^{(h, w, k)} \neq Y_2^{(h, w, k)}$ ($i$.$e.$, if $Y_1^{(h, w, k)}=1, Y_2^{(h, w, k)}=0$). Thus, the parameter $θ$ receives one gradient that is smaller than 0, and another that is larger than 0, despite coming from identical samples. This is not optimal for training the model, yet can easily occur when training a model on multiple datasets with conflicting label spaces.


\textbf{Problem Description.} The objective of the multi-domain incremental change detection problem is to update the model using only images from the new domain $D_{n}$, such that it performs well on the new domain dataset while maintaining performance on the old domain dataset. $D_n$ denotes the diachronic remote sensing images collected from a specific geographic condition, and $Y_n$ denotes the semantic labels under this domain as $\{0,1\}$, where 0 means no change and 1 means change, noting that unlike the semantic segmentation task $Y_n \in\{0,1\}, n=0,1, \ldots n$.

The objective of multi-domain incremental change detection is to train separate model $M$ to detect changes in each domain $D_{n}$, identifying the changed region. Thus, given $t$ domains, at each IL step $n$, the model is committed to learning a mapping $M_n \left(X_t, t\right)=Y_t$ and ensure that when learning the $nth$ domain, the model's performance in the previous domain $D_{t-i}, 0<i<t$ the performance degradation is less. At any given step $n$, the data from the previous domain is not available for training.

\textbf{Proposed Framework}. The model for incremental change detection is shown in Figure \ref{architecture}, where the UNet acts as a feature extractor, and the model consists of three major components: a shared encoder (blue), a domain residual unit (DRU), and a domain-specific encoder. 
%
Firstly, the old domain $D_{n-1}$ changes are detected under and then this model is frozen (gray). At step $n$, train the model $M_{n}$ inherited from the frozen model, which is $M_{n-1}$, to detect changed regions in the new domain while preserving the original detection ability of the frozen model. Model $n$ should be able to perform well under all the domains that occur. UNet is composed of a shared encoder $F$ and different domain-specific decoders $G$. 
%
At the $nth$ incremental step, the given input picture $\left\{X_1, X_2\right\}_n$ belongs to $D_n$, the mapping learned by our method $M_n\left(x_n, n ; W_s, W_n\right)=G_n\left(F\left(x_n, n ; W_s, \alpha_n\right)\right)$ that $W_{s}$ represents a set of shared, domain-invariant parameters common to all domains, while $W_{n}$ represents a set of domain-specific parameters unique to each domain. $W_{n}$ includes $\alpha_n$ and $G_{n}$, $\alpha_n$ denotes the feature extraction parameters for a specific domain $n$, and $G_{n}$ denotes the domain-specific decoder. 
%
The idea is to decompose the data hidden space in order to share parameters $W_{s}$ in order to capture homogeneous semantic expressions between different domains. In contrast, the information of a heterogeneous dataset is learned by the corresponding domain-specific parameters $W_{n}$ to learn. The basic module of the change detection model we designed is a ResNet-based encoder. We modify each residual unit in the encoder as a DRU. 

\begin{figure*}[tb]
	\begin{center}
		\includegraphics[width=0.9\linewidth]{images/1_main_pross.png}
	\end{center} \vspace{-4mm}
	
	\caption{Multi-domain incremental change detection framework. The blue line indicates the training Domain $n$, and the gray indicates the training domain $n-1$. $L_{FC}$ denotes distillation loss at Full Connect (FC) Layer, and  denotes distillation loss at Multi Feature Map (MFM).} \label{architecture}
\end{figure*}

\textbf{Domain Residual Unit:} As shown in Figure \ref{DRU}, each DRU contains (i) a set of domain-invariant parameters $W=\{w_{1}, w_{2}\}$, and (ii) a set of domain-specific parameters for each task $n$, $\alpha_n=\left\{\alpha_w, \alpha_s, \alpha_b\right\}$, $w_{1}, w_{2}$ is a 3 × 3 convolutional layer present in the conventional residual unit and shared among all domains. 
%
There are two types of domain-specific layers in DRU: i) domain-specific parallel residual adapter layers (DS-RAP), and ii) domain-specific batch normalization layers (DS-BN). 
%
$\alpha_{w}$ is a 1 × 1 convolutional layer parallel to the shared convolutional layer, and it acts as layer domain adapters. In BN, the normalized input is scaled and shifted to $s \odot x+b$, here, $\left(\alpha_s, \alpha_b\right)$ denote the learnable scale and shift parameters of the DS-BN layer. The shared weights act as universal filter banks, learning the knowledge of domain generalization. In contrast, the DS-RAP and DS-BN layers are unique to their specific domains and are responsible for learning domain-specific features.

The existing image classification method based on residual adapter aims to generalize the model to a new specific dataset without extracting their shared features. Therefore, when training a new dataset, the shared parameters $W_{s}$ will be frozen, for example, the weight trained in ImageNet will be used as the initial weight, and then only the specific domain (DS) parameters will be trained. However, performance is not ideal on pixel-level classification tasks such as change detection. Therefore, instead of freezing the shared parameters $W_{s}$, we fine-tune them on the new domain $D_{n}$ in an end-to-end training process. 
We propose a distillation structure that allows the parameters $W_{s}$ to learn domain-agnostic features and an optimization method for the parameters $W_{t}$ that enables them to learn domain-specific features, as described below.

\begin{figure}[tp]
	\begin{center}
		\includegraphics[width=0.8\linewidth]{images/RAU.png}
	\end{center} \vspace{-4mm}
	
	\caption{DRU module} \label{DRU}
\end{figure}


\subsection{Optimization Strategy} \label{Optimization Strategy}

\textbf{Domain-specific parameters:} For a specific task $t$, the domain-specific parameters consist of $W_t=\left\{\alpha_t, G_t\right\}$. 
To learn a new domain $D_{t}$ at step $t$, parameters $W_t$ are added to model $M_{t-1}$, referred to as $M_t$. From $M_{t-1}$ initializes all  $W_{t}$ including a random initialization of the output classifier layer (label space $Y_{t}$ is the same as $Y_{t-1}$ the same). This initialization strategy is referred to as $init W_{t}$. Domain-specific layers $W_{t}$ is trained only for the domain $D_{t}$ of task-specific losses is trained as shown in the following Eq. (\ref*{cross entrophy}).

\begin{equation}
L_{C E_t}=\frac{1}{N} \sum_{x_{t \in D_t}} \varphi_t\left(y_t, G_t\left(F\left(x_t, t ; W_s, \alpha_t\right)\right)\right)
\label{cross entrophy}
\end{equation}
%
where $\varphi_t$ is the task-specific softmax cross-entropy loss function. All previous domain $W_{t-i}, 0<i<t$ domain-specific layers on the current domain are kept frozen while training under the current domain.

\textbf{Domain-invariant parameters: }The $W_{s}$ layer of the encoder is shared in all tasks. In step $t$ of the IL, initialize the weight of $M_{t}$ from the corresponding weight in $M_{t-1}$. In addition to the task-specific cross entropy loss $L_{C E_t}$, distillation loss $L_{K D}$ is used to optimize the sharing weight to retain the domain knowledge under the previous domain data set. Distillation loss includes two parts, one is $L_{F C}$ and the other is $L_{M F M}$.

\begin{equation}
q_i^s, p_{i j}^s=M_t\left(x_t, t-1 ; W_s, W_{t-1}\right)\label{student pq}
\end{equation}

\begin{equation}
q_i^t, p_{i j}^t=M_{t-1}\left(x_t, t-1 ; W_s, W_{t-1}\right)
\label{teacher pq}
\end{equation}

\begin{equation}
L_{F C}=\sum_{i=1}^{t-1} \sum_{x_t \in D_t} \phi\left(q_i^s, q_i^t\right)
\label{loss FC}
\end{equation}

\begin{equation}
L_{M F M}=\sum_{i=1}^{t-1} \sum_{x_t \in D_t} \sum_{j=1}^5 \sigma\left(p_{i j}^s, p_{i j}^t\right)
\label{loss MFM}
\end{equation}

\begin{equation}
L_{K D}=L_{F C}+L_{M F M}
\label{loss KD}
\end{equation}
%
where $q_i^s, p_{i j}^s$ are respectively the predicted and intermediate feature maps from the current input $x_t$ to the current model $M_{t}$, and $q_i^t, p_{i j}^t$ are the predicted and intermediate feature maps from the current input $x_t$ to the previous model $M_{t-1}$. $\phi$ is the KL-divergence (KLD) loss between the two softmax probability distribution maps, computed and summed over each previously learned task $i, 0<i<t$. $\sigma$ is the similarity loss between the teacher network and the student network feature distribution maps, computed and summed over five different dimensions, $0<j<6$; KD can effectively distill domain knowledge from the teacher to the student and can be considered as a domain-adaptive distillation loss. Regarding the domain-invariant parameter $W_{s}$ can be summarized as follows.
%
\begin{equation}
L_{W_s}=L_{C E_t}+\lambda_{K D} \cdot L_{K D}
\label{loss Ws}
\end{equation}
%
where $\lambda_{K D}$ is the distillation hyperparameter.

\begin{algorithm}
	\caption{Training process in the $t_{th}$ incremental step} \label{algorithm}
	
	\hspace*{0.02in} {\bf Input:} \\
		$D_{t}$: New data for the current $t$-step \\
		$M_{t-1}$: Model for the previous step $t$-1 \\
	\hspace*{0.02in} {\bf Output:} \\
	$M_{t}$ Parameters
	
	
	\begin{algorithmic}[1]
			
		\State Initialization
		
		\State $M_{t} \gets$ add a new DS layer $W_{t}$ to the $M_{t-1}$
		\State $init_{W_{t}}$:$W_{t}$ of $M_{t} \gets$ $W_{t-1}$ of $M_{t-1}$
		\State Freeze:DS weights for all previous domains $W_{t-i}, 0<i<t$
		\For {epoch}
		\For {mini-batch}
		\State Forward pass $M_t\left(x_t, t\right)$ via $W_{t}$
		\State Compute task-specific loss $L_{C E_t}$ for $D_t$ by Eq.\ref{cross entrophy}
		\State Forward pass $M_t\left(x_t, t-1\right)$ via $W_{t-1}$, Eq.\ref{student pq}
		\State Forward pass $M_{t-1}\left(x_t, t-1\right)$ via $W_{t-1}$ of $M_{t-1}$, Eq.\ref{teacher pq}
		\State Compute FC loss $L_{FC}$ by Eq.\ref{loss FC}
		\State Compute MFM loss $L_{MFM}$ by Eq.\ref{loss MFM}
		\State Compute KD loss $L_{KD}$ by Eq.\ref{loss KD}
		\State Compute $L_{W_{s}}$ loss by Eq.\ref{loss Ws}
		\State Update
		\EndFor
		\EndFor	
		
		\State Discard training data $D_{t}$
		
	\end{algorithmic}
\end{algorithm}

When the shared weights $W_{s}$ are trained on the domain-specific loss $L_{C E_t}$ in the current step, they learn the features of the current domain and quickly forget the domain-specific representations learned in the previous domains. Minimizing the $L_{K D}$ between the output feature maps of the previous model and the current model will preserve the domain knowledge of the previous task in the shared weights. Thus, the $L_{K D}$ and $L_{C E_t}$ combination helps to train the domain-invariant shared layer in the encoder. The weights $W_{t}$ are domain specific because they are trained only for domain specific losses. In summary, the above steps re-parameterize the model into domain-specific and domain-invariant features, thus achieving strong performance on the new domain while preserving the performance on the old domain.


\subsection{\textbf{Inference Phase}} \label{Inference Phase}
For the input pair of time-stamped images $x_t=\left\{X_1, X_2\right\}_t$, $t \in T$ (tasks learned so far by the model), our model outputs a change map of predictions $\hat{y}_t=M_t\left(x_t, t\right)$ in the label space $Y_{t}$. During testing in the target domain $D_{t}$, only the relevant parameters $\alpha_{t}$ and $G_{t}$ are activated in the forward pass. In fact, our model has multiple domain-specific paths with a large degree of parameter sharing.

%\subsection{\textbf{Experimental Setting.}} \label{Experimental Setting.}




\section{Experiments} \label{Experiments}

\subsection{\textbf{Datasets}}

Currently, there is no dataset for multi-domain incremental change detection, therefore, we collated a dataset for change detection incremental learning from the design, containing three sub-datasets, representing different domains. Domain1: selected Guangzhou and Hong Kong area typical geographical scenes from SYSU \cite{shi2021deeply}, containing 3000 image pairs for training and 1000 test images; Domain2: 16000 CDD \cite{bourdis2011constrained} image pairs with image size of 256$\times$256 pixels and spatial resolution of 0.03-1m, collected from seven pairs of 4725$\times$2700 real-time seasonal change remote sensing images; Domain 3: PRCV includes remote sensing images of major first-tier cities in China. The total amount of image data is 10,000 pairs, with a size of 512 $\times$ 512, mainly distributed in Beijing, Shanghai and other cities. \textcolor{red}{The data addresses are.}


\subsection{\textbf{Evaluation Metric}}

Change detection is typically evaluated using four typical metrics in traditional fixed-domain datasets tasks: Accuracy, Recall, F1 score, and IoU. Specifically, accuracy reflects the false positive rate of the model, recall reflects the false negative rate, and F1 takes into account both metrics. Thus, a larger F1 score indicates a better model. IoU indicates the relationship between the change category on the detection map and the ground truth overlap of the detection map. Usually, an IoU greater than 0.5 indicates a good result. These four metrics are calculated as follows.

\begin{equation}
\text { precision }=\frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FP}}
\end{equation}

\begin{equation}
\text { recall }=\frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FN}}
\end{equation}

\begin{equation}
\mathrm{F} 1=\frac{2 \text { presicion } \cdot \text { recall }}{\text { presicion }+\text { recall }}
\end{equation}

\begin{equation}
\text { IoU }=\frac{\text { DetectionResult } \cap \text { GroundTruth }}{\text { DetectionResult } \cup \text { GroundTruth }}
\end{equation}
%
where TP, FP, TN, and FN refer to true positives, false positives, true negatives, and false negatives, respectively.

However, the goal of multi-domain incremental change detection tasks is to achieve good detection performance in the new domain while preserving the detection performance in the old domains, that is, to reduce the level of catastrophic forgetting. Traditional performance metrics for change detection focus more on the degree of performance decay of the current model in the previous domain. Thus, similar to\cite{zhan2017change}, we quantify the overall IL performance of model m as the average per-task decline in change detection performance (IoU) relative to the corresponding individual task baseline $b$ as:
%
\begin{equation}
\Delta_{I o U} \%=\frac{1}{T} \sum_{t=1}^T \frac{\operatorname{IoU}_{m, t}-I o U_{b, t}}{I o U_{b, t}}
\end{equation}
%
where $IoU_{m, t}$ is the segmentation accuracy of model $m$ on task $t$. $\Delta_{I o U} \%$ The stability-plasticity tradeoff is quantified to give an overall score of IL performance.


\subsection{\textbf{Implementation Details}}

We use UNet\cite{ronneberger2015u} as the network backbone and dynamically add modules on top of this foundation. We compute on a standard validation set of these datasets  to measure the incremental learning capability of the model.

Considering IL settings between two consecutive tasks and three tasks, the goal is to maintain a good elasticity balance using a model trained on one geographic domain and learning incrementally on another domain or two domains.

\begin{table*}[htb]
	\begin{center}
		\small
		\caption{Results of the 2 task increment settings. In step $t$ for the current dataset , after performing incremental learning, we test the performance of all datasets. The arrows indicate the learning order. The parentheses show the decrease/increase in performance of the corresponding dataset relative to the individual task benchmark.  smaller indicates better stability-plasticity tradeoff and overall performance.} \label{2task-results}
		\scalebox{1.1}{
			\begin{tabular}{c|c|c|c|c|c|c|c}
				\hline \hline
				\multirow{1}{*}{IL Step}  &\multirow{1}{*}{Step1 SYSU}  & \multicolumn{3}{|c|} {Step2 SYSU$\rightarrow$CDD}    & \multicolumn{3}{|c} {Step2 SYSU$\rightarrow$PRCV}\\
				
				\hline
				Methods &SYSU &SYSU &CDD &$\Delta_{I o U} \%$ &SYSU &PRCV &$\Delta_{I o U} \%$ \\
								
				\hline
				
				Single-task &73.86 &73.86 &88.17 &- &73.86 &75.10 &- \\
				Baseline &73.86 &46.95 &86.98 &$18.89\%$ &41.71 &74.72 &$22.02\%$ \\
				\hline
				FT &73.86 &57.16 &85.88 &$12.60\%$ &41.55 &72.64 &$23.51\%$ \\
				FE &73.86 &73.86 &65.87 &$12.65\%$ &73.86 &59.37 &$10.47\%$ \\
				KD &73.86 &56.28 &89.95 &$10.89\%$ &41.62 &74.58 &$22.17\%$ \\
				\hline
				Ours &\textbf{73.86} &\textbf{69.04} &\textbf{90.92} &\textbf{$1.70\%$} &\textbf{69.32} &\textbf{71.88} &\textbf{$5.21\%$} \\
				
				\hline \hline 
		\end{tabular}}
	\end{center} 
\end{table*}


\subsection{Results}
We analyze the task settings SYSU$\rightarrow$CDD, SYSU $\rightarrow$ PRCV in detail to compare these two possible scenarios. We also show the results for three task settings, including SYSU$\rightarrow$CDD$\rightarrow$PRCV and SYSU$\rightarrow$PRCV$\rightarrow$CDD. In the change detection task, domain shifts occur when moving between two datasets, but their label spaces are aligned.

\underline{\emph{Incremental Learning Baselines:}} We compare our proposed approach with four standard IL baselines. Single-task baselines represent datasets trained independently on separate models, which can be considered as an upper limit of IL performance. It is used to calculate catastrophic forgetting and overall evaluation scores $\Delta_{I o U} \%$.The baseline model refers to the performance without taking any IL operations. Fine-tuning (FT) is a commonly used basic method in incremental learning, where the model is fine-tuned on the new domain without any additional constraints to mitigate forgetting. In Feature Extraction (FE), we freeze all encoder weights and train only the decoder weights on the new domain. FT provides maximum plasticity and minimum stability, while FE shows maximum stability and minimum plasticity. We also compare our approach with most existing incremental approaches for semantic segmentation classes: knowledge distillation (KD) based on intermediate feature layers.


\underline{\emph{SYSU $\rightarrow$ CDD:}} In this setup, we first learn the model on SYSU in step 1, followed by incrementally learning the same model on CDD in step 2. As shown in Table \ref{2task-results}, the forgetting on SYSU is mitigated by $22.09 \%$ (relative to the baseline Baseline) using our model, which is only $4.82 \%$ lower than the single task cap. 

\underline{\emph{SYSU $\rightarrow$ PRCV:}} Table \ref{2task-results} shows that we first learn the SYSU model in step 1, and then gradually learn PRCV in step 2. Through our model, the forgetfulness of SYSU is reduces by $27.61 \%$. This shows that despite the size unalignment, our approach can retain the old task performance in SYSU → PRCV almost as well as the good SYSU → CDD setup ($4.54 \%$ forgetting rate for SYSU after learning PRCV and $4.82 \%$ forgetting rate after learning CDD). 

\underline{\emph{3-Task Incremental Settings:}} We also conducted $SYSU \rightarrow CDD \rightarrow PRC$ and $SYSU\rightarrow PRCV \rightarrow CDD$ experiments, and the results are shown in the Table \ref{3-task IL}. We also explore the case where the domain ordering is different. These results show that our model is generalizable even when the domain order is different.


\begin{table}[htb]
	\begin{center}
		\caption{Results of 3-task incremental learning settings.} \label{3-task IL}
		\begin{tabular}{c|c|c|c|c}  
			\hline \hline
			\multirow{2}{*}{IL Step} &\multicolumn{4}{c} {Step3}\\
			& \multicolumn{4}{c}{SYSU$\rightarrow$CDD$\rightarrow$PRCV}\\
			
			\hline
			Methods &SYSU  &CDD &PRCV &$\Delta_{I o U} \%$	\\
			\hline
			Single-task &73.86  &88.17 &75.10 &-	\\
			\hline
			FT  &44.44  &47.87 &74.58 &$28.74\%$		\\
			FE  &73.86  &65.87 &61.39 &$14.82\%$		\\
			\hline
			Ours &{64.94}  &{80.93} &71.81 &$8.22\%$ 		\\
			\hline \hline 
			\multirow{1}{*}{} & \multicolumn{4}{|c} {SYSU$\rightarrow$PRCV$\rightarrow$CDD}\\
			\hline
			Methods &SYSU  &PRCV &CDD &$\Delta_{I o U} \%$	\\
			\hline
			Single-task &73.86 &75.10 &88.17 &-\\
			\hline
			FT &51.94 &42.41 &90.62 &$23.48\%$ \\
			FE &73.86 &59.37 &67.38 &$14.84\%$ \\
			\hline
			Ours &67.72 &65.68 &89.89 &$6.30\%$ \\
			\hline
		\end{tabular}
	\end{center} 
\end{table}


\begin{table}[htb]
	\begin{center}
		\caption{Results of domain ordering on CDD$\rightarrow$PRCV and PRCV$\rightarrow$CDD.} \label{Other-2task}
		\begin{tabular}{c|c|c|c|c}  
			\hline \hline
			IL Step &Step1 CDD &\multicolumn{3}{c} {Step2 CDD$\rightarrow$PRCV} \\
			
			\hline
			Methods &CDD  &CDD &PRCV &$\Delta_{I o U} \%$	\\
			\hline
			Single-task &88.17  &88.17 &75.10 &-	\\
			\hline
			FT  &88.17  &47.31 &72.93 &$24.62\%$		\\
			FE  &88.17  &88.17 &63.07 &$8.01\%$		\\
			\hline
			Ours &{88.17}  &{87.69} &73.24 &$1.51\%$ 		\\
			\hline \hline 
			&Step1 PRCV &\multicolumn{3}{c} {Step2 PRCV$\rightarrow$CDD}\\
			\hline
			Methods &PRCV  &PRCV &CDD &$\Delta_{I o U} \%$	\\
			\hline
			Single-task &75.10 &75.10 &88.17 &-\\
			\hline
			FT &75.10 &42.56 &89.94 &$20.66\%$ \\
			FE &75.10 &75.10 &78.18 &$5.67\%$ \\
			\hline
			Ours &75.10 &72.25 &91.18 &$0.19\%$ \\
			\hline
		\end{tabular}
	\end{center} 
\end{table}

\subsection{\textbf{Ablation Study}}
In the following part, we analyze the two major parts of the proposed method. 

\begin{table*}[htb]
	\begin{center}
		\caption{Comparison with other knowledge distillation methods} \label{Ablation-KD}
		\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c}  
			\hline \hline
			IL Step &Step1 SYSU &\multicolumn{3}{|c}{Step2 SYSU$\rightarrow$CDD} &\multicolumn{3}{|c}{Step2 SYSU$\rightarrow$PRCV} &\multicolumn{4}{|c}{Step3 SYSU$\rightarrow$CDD$\rightarrow$PRCV}\\
			
			\hline
			Methods &SYSU  &SYSU &CDD &$\Delta_{I o U} \%$ &SYSU &PRCV &$\Delta_{I o U} \%$ &SYSU &CDD &PRCV &$\Delta_{I o U} \%$\\
			\hline
			Single-task &73.86  &73.86 &88.17 &- &73.56 &75.10 &- &73.86 &88.17 &75.10 &-	\\
			\hline
			KD-FC  &73.86  &68.44 &85.88 &$4.97\%$ &66.02 &63.78 &$12.84\%$ &65.17 &77.24 &61.30 &$14.18\%$		\\
			KD-FM  &73.86  &67.56 &83.64 &$6.83\%$ &68.75 &65.12 &$10.10\%$ &66.37 &78.45 &66.54 &$10.85\%$		\\
			\hline
			Ours   &73.86  &69.04 &90.92 &$1.70\%$ &69.32 &71.88 &$5.22\%$  &64.94 &80.93 &71.81 &$8.22\%$ 		\\
			\hline
		\end{tabular}
	\end{center} 
\end{table*}

\begin{table*}[htb]
	\begin{center}
		\caption{Comparison with other adapter-based architectures} \label{Ablation-DAU}
		\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c}  
			\hline \hline
			IL Step &Step1 SYSU &\multicolumn{3}{|c}{Step2 SYSU$\rightarrow$CDD} &\multicolumn{3}{|c}{Step2 SYSU$\rightarrow$PRCV} &\multicolumn{4}{|c}{Step3 SYSU$\rightarrow$CDD$\rightarrow$PRCV}\\
			
			\hline
			Methods &SYSU  &SYSU &CDD &$\Delta_{I o U} \%$ &SYSU &PRCV &$\Delta_{I o U} \%$ &SYSU &CDD &PRCV &$\Delta_{I o U} \%$\\
			\hline
			Single-task &73.86  &73.86 &88.17 &- &73.56 &75.10 &- &73.86 &88.17 &75.10 &-	\\
			\hline
			RAS-I  &76.91  &69.43 &88.51 &$3.00\%$ &45.19 &70.73 &$22.32\%$ &66.19 &54.19 &69.59 &$18.75\%$		\\
			RAP-I  &74.04  &69.09 &85.81 &$4.57\%$ &65.96 &62.23 &$13.92\%$ &65.49 &79.70 &59.70 &$13.81\%$		\\
			\hline
			Ours   &73.86  &69.04 &90.92 &$1.70\%$ &69.32 &71.88 &$5.22\%$  &64.94 &80.93 &71.81 &$8.22\%$ 		\\
			\hline
		\end{tabular}
	\end{center} 
\end{table*}


\subsubsection{Distillation Module}

In this section, we study the importance of distillation losses for different locations of the network for achieving stability-plasticity tradeoffs in change detection tasks. Usually, in general category incremental tasks, most approaches use distillation of the fully connected layer to convey the category information of the teacher network, called KD-FC. In semantic segmentation tasks, many distillation structures have also been developed for the intermediate feature maps, called KD-FM. due to the specificity of the change detection task, which is a binary problem, it is not possible to simply pass KD-FC approach to transfer teacher network knowledge. 

\begin{figure}[thp]
	\begin{center}
		\includegraphics[width=0.5\linewidth]{images/KD.png}
	\end{center} \vspace{-4mm}
	
	\caption{Different modules of distillation} \label{different-distillation}
\end{figure}

However, we found through experiments that the effect of alleviating forgetting in the change detection task is not obvious if only distillation through KD-FM is used, so we proposed a Multi-Level Feature Map distillation method: on the basis of the above two methods, the intermediate feature layer is distilled in multiple dimensions, so as to effectively transfer the geographic environment learned by the teacher network change information to the student network.


\subsubsection{Expandable Modules}

RAP-I [12] proposed parallel residual adapters whose $W_{s}$ weights are obtained from ImageNet pre-training and frozen, RAS-I [9] is a series of residual adapters. The hierarchical residual adapter α included in RAP and RAS are different. In these models, only task-specific adapter layers are trained and the $W_{s}$ weights are frozen. RAP is a plug-and-play residual adapter that can be easily inserted into existing segmentation models. RAS is sequential adapters that need to be included when ResNet is pre-trained on Imagenet for best performance and cannot be used directly with adapters.

\begin{figure}[thp]
	\begin{center}
		\includegraphics[width=0.8\linewidth]{images/RAS.png}
	\end{center} \vspace{-4mm}
	
	\caption{Different expandable modules} \label{different-expandable}
\end{figure}

Through experiments, we found that the RAP adapter is more suitable when fine-tuning the shared weights $W_{s}$ in the incremental change detection task.



\section{Conclusion}

We define the problem of multi-domain incremental change detection and propose a dynamic architecture based on parameter isolation that provides a significant improvement over the baseline approach. Our model allows different domains to have specific network parameters, while having a large degree of parameter sharing in the generic model. To address the problem of a fixed number of labels but different label spaces in change detection tasks, we also design a multi-layer feature layer distillation structure that allows the network to retain knowledge of prior domains by learning intermediate semantic representations of prior domains. Finally, we also collected and created a multi-domain change detection dataset, laying a foundation for future incremental change detection research.


\section*{Acknowledgements}

This work is partly supported by grants from National Natural Science Foundation of China (XXX).

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{References}

\bibliography{mybibfile}

\end{document}
