\documentclass[lettersize,journal]{IEEEtran}
\usepackage{amsmath,amsfonts}
\usepackage{amssymb}
\usepackage{algorithmic}
\usepackage{algorithm}
\usepackage{array}
\usepackage[caption=false,font=normalsize,labelfont=sf,textfont=sf]{subfig}
\usepackage{textcomp}
\usepackage{stfloats}
\usepackage{url}
\usepackage{verbatim}
\usepackage{graphicx}
\usepackage{cite}
\hyphenation{op-tical net-works semi-conduc-tor IEEE-Xplore}
% updated with editorial comments 8/9/2021

\usepackage{multirow}

\graphicspath{{figures/}}
\newcommand{\figref}[1]{Figure~\ref{#1}}
\newcommand{\tabref}[1]{Table~\ref{#1}}
\newcommand{\secref}[1]{Section~\ref{#1}}
\newcommand{\algref}[1]{Algorithm~\ref{#1}}
\newcommand{\equaref}[1]{Equation~\ref{#1}}

% \usepackage[justification=centering]{caption}
\usepackage{soul} % 导入 soul 包
\usepackage{color, xcolor} % 颜色包，color 必须导入，xcolor 建议导入

\usepackage{framed}
\definecolor{shadecolor}{rgb}{0.92,0.92,0.92}

\begin{document}

\title{Improving Out-of-Distribution Detection Robustness Via Proper Robust Out-of-Distribution Learning}


\author{IEEE Publication Technology,~\IEEEmembership{Staff,~IEEE,}
        % <-this % stops a space
\thanks{This paper was produced by the IEEE Publication Technology Group. They are in Piscataway, NJ.}% <-this % stops a space
\thanks{Manuscript received April 19, 2021; revised August 16, 2021.}}

% The paper headers
\markboth{Journal of \LaTeX\ Class Files,~Vol.~14, No.~8, August~2021}%
{Shell \MakeLowercase{\textit{et al.}}: A Sample Article Using IEEEtran.cls for IEEE Journals}

% \IEEEpubid{0000--0000/00\$00.00~\copyright~2021 IEEE}
% Remember, if you use this you must call \IEEEpubidadjcol in the second
% column for its text to clear the IEEEpubid mark.

\maketitle

\begin{abstract}
  Deep Neural Networks (DNNs) learn decision logic from training data that is often incomprehensible to developers, posing challenges for the development of secure DNN software. DNNs are vulnerable to attacks from both seen In-Distribution (ID) samples, referred to as ID attacks, and unseen Out-Of-Distribution (OOD) samples, known as OOD attacks. However, most detection methods primarily focus on detecting ID attacks and struggle to maintain robustness against adaptive OOD attacks that target the detection mechanisms. In this work, we propose Defense-as-Detection (DaD), a semi-supervised approach that ingeniously leverages the defense derived from adversarial training to enhance the OOD detection robustness of DNNs. DaD introduces multiple 'OOD' class nodes into the final layer of the DNN to serve as detectors and conducts adversarial training on these class nodes using auxiliary OOD samples to achieve robust OOD learning. The key insight of DaD is to use a self-learning method to generate pseudo-labels for effectively distinguishing between different OOD samples. This ensures that each `OOD' class competes with the others to learn their class-specific robust OOD discriminative features, utilizing the effective adversarial defenses over the `OOD' class nodes as detection mechanisms to withstand OOD attacks. Moreover, DaD exploits adversarially perturbed ID samples as auxiliary OOD samples to impede OOD attacks from gaining high confidence in ID classes and circumventing detection. Experiments demonstrate that DaD significantly enhances DNN robustness in detecting malicious OOD samples generated by adaptive OOD attacks while maintaining comparable performance on clean (ID and OOD) data. Additionally, DaD outperforms other methods in detecting adaptive ID attacks. 
\end{abstract}

\begin{IEEEkeywords}
Deep neural network testing, out-of-distribution detection robustness, outlier detection, adversarial detection, semi-supervised learning
\end{IEEEkeywords}

\section{Introduction}
\label{sec:intro}

Deep Neural Networks (DNNs) have achieved remarkable generalization performance and play a core decisional role in various intelligent systems, such as medical diagnostics \cite{chen2021transunet} and autonomous driving \cite{chen2024end, chib2023recent, bojarski2016end}. Unlike traditional software, the decision-making logic of DNNs is learned from training data and is not easily interpretable, posing significant challenges in developing secure DNN-based systems. Numerous studies have shown that DNNs are not only susceptible to being fooled by attacks that inject imperceptible adversarial perturbations \cite{szegedy2013intriguing} into seen In-Distribution (ID) samples (as illustrated in the upper left part of \figref{fig:dnn-detection-robustness-route}), but they also tend to misclassify unseen Out-Of-Distribution (OOD) samples \cite{hendrycks2016baseline} into a certain ID class with overly high confidence (as shown in the lower right part of \figref{fig:dnn-detection-robustness-route}).

The susceptibility of DNNs to attacks from ID examples (ID attacks) inspires the proposal of numerous adversarial attack methods \cite{goodfellow2014explaining, moosavi2016deepfool, carlini2017towards, Kurakin2017AdversarialEI, croce2020reliable, andriushchenko2020square}. Consequently, a variety of defense methods \cite{papernot2016distillation, bai2019hilbert, ma2018characterizing, tramer2017ensemble, goodfellow2014explaining, madry2017towards, zhao2023attack} and detection methods \cite{xu2017feature, hendrycks2016early, feinman2017detecting, ma2018characterizing, lee2018simple, metzen2017detecting, mccoyd2018background, yang2021class, xiao2022self} have also been proposed to develop DNNs that are robust against these ID attacks. While defense methods aim to predict the true labels of adversarially perturbed ID samples (adversarial ID samples), detection methods focus on identifying adversarial ID samples and discarding predictions for them. In the defense domain, most methods have been circumvented by subsequently stronger attacks, with adversarial training, re-training with adversarial (ID) samples, being nearly the only effective means of developing robust DNNs. However, adversarial training inevitably reduces the accuracy of DNNs on ID samples (due to forcing DNNs to abandon the use of perturbation features for decision-making\cite{tsipras2019robustness}), which is unsuited for high-accuracy scenarios. In the detection domain, most methods are bypassed by adaptive attacks that target the detection mechanism \cite{tramer2020adaptive} during the testing phase, which has led to increased attention and efforts to withstand these strong attacks, as shown in the upper right part of \figref{fig:dnn-detection-robustness-route}.

\begin{figure}[!tbp]
  \centering
  \resizebox{0.49\textwidth}{!}{%
  \includegraphics[width=\linewidth]{attack-detection-route-v3.pdf}
  }
  \caption{Evolutionary routes of DNNs against different attacks. Std. DNN represents a standardly trained DNN, I-Rob. DNN denotes a DNN designed to be robust against attacks from ID samples (ID attacks), and O-Rob. DNN represents a DNN designed to be robust against attacks from OOD samples (OOD attacks).}
  \label{fig:dnn-detection-robustness-route}
  \end{figure}

Moreover, to deal with the overconfidence of DNNs on OOD samples, numerous OOD detection methods either focus on designing ad-hoc or post-hoc scoring functions \cite{lee2018simple, denouden2018improving, abdelzad2019detecting, wang2020dissector, xiao2022self} during the development phase to enable trained DNNs to better distinguish between ID and OOD samples, or prioritize re-training DNNs to achieve more separable representations between ID and OOD samples for end-to-end detection (often using a uniform distribution \cite{lee2018training, hendrycks2018deep} or additional reject classes \cite{mohseni2020self, vernekar2019analysis} to represent OOD samples). These two lines of methods are compatible with each other, but the latter can fundamentally address the overconfidence issue and solve the OOD detection problem with significantly improved performance \cite{augustin2020adversarial}. 

However, most well-established OOD detections remain vulnerable to being bypassed when OOD samples \cite{sehwag2019analyzing, bitterwolf2020provable} are subject to adversarial perturbations. Suppose that the DNN in the lower half of \figref{fig:dnn-detection-robustness-route} is well-calibrated and outputs a prediction probability of 50\% for the desk in both the cat and dog classes. However, when an attacker injects adversarial perturbations onto the desk, the same DNN outputs a probability of 99\% for the desk in either the cat or dog class, resulting in a detection failure and a classification error. Nonetheless, the carefully designed scoring functions in the deployment phase alone cannot effectively block detection-aware adaptive attacks \cite{chen2021atom, zhou2023fixing}, and some works try to re-develop DNNs that are fundamentally robust to OOD attacks. Representatively, ACET \cite{hein2019relu} and ATOM \cite{chen2021atom} introduce adversarial training to auxiliary OOD data with pseudo-labels of a uniform distribution and an additional reject class, respectively, while RCE \cite{chen2022robust, augustin2020adversarial} further applies adversarial training to both ID and auxiliary OOD data. However, as pointed out by RobDet \cite{zhou2023fixing}, the adversarial training on ID data in RCE significantly harms the performance of DNNs in detecting clean OOD samples, and the adversarial training on OOD data in ACET and ATOM is insufficient to ensure the robustness of OOD detection. 

As a continuation of these works, we also focus on enhancing the end-to-end robustness of DNNs against adaptive OOD attacks using class prediction probabilities. At the same time, we ensure that the detection performance for clean OOD examples is not significantly compromised. By employing strong OOD attacks, which are optimized by the advanced Auto-PGD search algorithm \cite{croce2020reliable}, we first investigate why adversarial training on auxiliary OOD data in existing OOD detection methods fails, given that regular adversarial training on ID data can be successful. We find that \textbf{using multi-class pseudo-labels that can effectively distinguish different auxiliary OOD samples is critical to the success of adversarial training on OOD data} (see \secref{sec:motivation} for details). This is analogous to regular adversarial training, which requires true labels for samples belonging to each class in order to successfully learn robust class-specific features.

Building on this key insight, we further propose Defense-as-Detection (DaD), a novel semi-supervised training approach that ingeniously utilizes adversarial training defense to improve OOD detection robustness. DaD adds multiple `OOD' class nodes to the last layer of the DNN to represent OOD samples, which function as detectors. By using a self-learning method to generate pseudo-labels (\secref{sec:pseudo-label-method}) that effectively differentiate OOD samples for adversarial training, DaD allows each "OOD" class node to compete against one another, learning robust OOD discriminative features and establishing a strong defense against OOD attacks. Further, DaD mines adversarial ID samples in the neighborhood of ID samples as auxiliary \emph{near} OOD samples (i.e., samples close to the in-distribution region) for training, to prevent attacks from gaining high confidence in incorrect ID classes and bypassing detection. Note that while RobDet also uses multiple additional `other' classes, it does not provide such key insight as DaD does, and the pseudo-labeling method of DaD also differs from that of RobDet, which allows DaD to be trained more easily and stably across different datasets and achieve better robustness (\secref{sec:main-res}). 

Experiments under strong adaptive OOD attacks demonstrate that DaD outperforms state-of-the-art (SOTA) detections in improving the OOD detection robustness of DNNs, without compromising clean OOD detection performance. For example, on the more challenging dataset CIFAR100, DaD achieves **.**\% and **.**\% AUC scores higher than SOTA RobDet and RCE \cite{chen2022robust} in detecting OOD attacks, while maintaining leading performance in detecting clean OOD samples. In detecting adaptive ID attacks, DaD even significantly outperforms the cutting-edge RobDet, with overwhelming performance advantages over ACET and ATOM. Code is available at: https://anonymous.4open.science/r/defense-as-detection-B131/README.md. Overall, our contributions are:
\begin{itemize}
\item We reveal for the first time that generating multi-class pseudo-labels that can well distinguish different auxiliary OOD samples is critical to the success of adversarial training on OOD data. 
\item We propose Defense-as-Detection (DaD), a novel semi-supervised approach that properly employs adversarial training defense on additional `OOD' class nodes to enhance OOD detection robustness without compromising clean OOD detection. By using a self-learning method to generate pseudo-labels that effectively differentiate between OOD samples, DaD ensures that each `OOD' class node competes with others to successfully build robust defenses for robust OOD detection. 
\item Through extensive experiments, we demonstrate that DaD significantly improves the robustness of DNNs in detecting OOD (and ID) attacks, without sacrificing performance in classifying clean ID samples and detecting clean OOD samples. 
\end{itemize}

The structure of this paper is as follows: \secref{sec:prel} introduces the preliminaries, \secref{sec:motivation} explains the motivation behind DaD, \secref{sec:at-id-2-detection} provides a detailed description of DaD, \secref{sec:expm} evaluates DaD, \secref{sec:threats} discusses threats to validity, and \secref{sec:conclusion} concludes the paper. 

\section{Preliminaries}
\label{sec:prel}

We discuss adversarial attack, adversarial robustness, adversarial detection and OOD detection, and robust OOD detection in \secref{sec:adv-attack}, \secref{sec:adv-training}, \secref{sec:OOD-det} and \secref{sec:rob-OOD-det}, respectively. 

\subsection{Adversarial Attack}
\label{sec:adv-attack}

DNNs are extensively utilized in a variety of intelligent systems, yet they are vulnerable to subtle adversarial perturbations \cite{szegedy2013intriguing}. This vulnerability generates considerable interest in creating sophisticated adversarial testing and attack methods \cite{goodfellow2014explaining, papernot2016limitations, MoosaviDezfooli2016DeepFoolAS, tramer2017ensemble, Kurakin2017AdversarialEI, kurakin2016adversarial, madry2017towards, pei2017deepxplore, ma2018deepgauge, sun2018concolic, tian2018deeptest, xie2019deephunter} aimed at crafting adversarial samples. Among these methods, the most frequently discussed may be the \(K\)-step Projected Gradient Descent (PGD-\(K\)):
\begin{equation}
  \delta^{k+1}=\Omega_{||x'-x||_{p} \leqslant \epsilon} \left(\delta^{k} + \alpha \cdot sign(\bigtriangledown_{\delta^{k}} \ell(f_{\theta}(x+\delta^{k}), y)) \right)
  \label{eq:pgd-atttack}
\end{equation}
In this equation, $\delta^{k}$ represents the perturbation at the \(k\)-th iteration, while $\ell(f_{\theta}(x+\delta^{k}), y)$ denotes the adversarial loss (e.g., cross-entropy loss) calculated between the predictions of the perturbed input \(x+\delta^{k}\) and its true label \(y\). The term $\bigtriangledown_{\delta^{k}}$ indicates the gradient of the loss with respect to the perturbation $\delta^{k}$. The function $sign$ returns 1 if the input is greater than 0, and -1 if it is less than 0. The variable $\alpha$ indicates the size of the attack step, and $\Omega_{||x-x'||_{p}}$ projects $\delta^{k}$ back onto the $\epsilon$-ball defined by the \(L_{p}\) norm (with \(p \in \{0, 1, 2, \infty\}\)) around the original input \(x\). The initial perturbation $\delta^{0}$ is selected as a random vector, ensuring that $||\delta^{0}||_{p} \leq \epsilon$.

\subsection{Adversarial Robustness}
\label{sec:adv-training}

As adversarial testing and attack methods have become more widespread, numerous defense strategies \cite{papernot2016distillation, bai2019hilbert, ma2018characterizing, tramer2017ensemble, goodfellow2014explaining, madry2017towards} have emerged. Nonetheless, most of them are ultimately compromised by later stronger attacks, with Adversarial Training (AT) \cite{goodfellow2014explaining, madry2017towards} being one of the few methods that effectively enhances the robustness of DNNs \cite{athalye2018obfuscated}. AT utilizes adversarial examples as a form of data augmentation, as formulated in the following minimization-maximization problem: 
\begin{equation}
  \arg\min_{\theta}  \frac{1}{N} \sum_{i=1}^{N} {\max_{||\delta||_{p} \leq  \epsilon} \ell(f_{\theta}(x_{i} + \delta_{i}),y_{i})}
  \label{eq:at-framework}
\end{equation}
Here, $N$ represents the total number of training examples, while $x_i$ denotes the $i$-th sample with a corresponding true label $y_{i}$. In practice, a PGD attack \footnote{We refer to the $K$-step Projected Gradient Ascent search algorithm for maximizing the adversarial loss as PGD-$K$ for simplicity.} is commonly employed to approximate the internal `max' operation in \equaref{eq:at-framework}. Despite its robustness-enhancing capabilities, AT tends to compromise overall accuracy, as it leads DNNs to overlook perturbation features in their decision-making processes \cite{tsipras2019robustness}. 

Another line of this research field investigates false security and assesses worst-case robustness.While using the PGD attack with cross-entropy loss may fail to uncover gradient masking (as observed in distillation defenses \cite{papernot2016distillation}), the CW attack \cite{carlini2017towards} directly manipulates the logits before applying the softmax activation. Gowal et al. \cite{gowal2019alternative} proposes a multi-target surrogate loss that sequentially targets each erroneous class. Furthermore, adaptive attacks \cite{carlini2017adversarial, tramer2020adaptive} emphasize critical factors for crafting attacks that reliably evaluate robustness. The Auto-PGD search algorithm \cite{croce2020reliable, croce2021robustbench} further enhances the PGD algorithm by incorporating automatic step-size adjustment, attack restarts, and momentum updates to mitigate the risk of local optima. In this paper, we primarily leverage Auto-PGD and the multi-target surrogate loss to develop our adaptive attacks, enabling a comprehensive evaluation of detection robustness.



\subsection{Adversarial Detection and Out-of-distribution Detection}
\label{sec:OOD-det}

The susceptibility of DNNs to adversarial perturbations and their overconfidence in OOD samples have spurred the development of diverse detection techniques aimed at identifying both adversarially perturbed (ID) samples \cite{hendrycks2016early, feinman2017detecting, xu2017feature} and benign OOD samples \cite{hendrycks2016baseline}. While these techniques can be classified into two distinct subfields, they broadly fall into two methodological categories. The first category emphasizes the design of enhanced scoring functions for trained DNNs during the deployment phase. Exemplary methods within this realm include statistics-based scoring functions \cite{grosse2017statistical, hendrycks2016baseline, ma2018characterizing, lee2018simple, feinman2017detecting}, which utilize the DNN as a feature extractor, employing outputs from either hidden or final layers to distinguish between ID and OOD samples. This category encompasses techniques such as KDE \cite{feinman2017detecting}, Mahalanobis distance \cite{lee2018simple, denouden2018improving}, Dissector \cite{wang2020dissector}, and Self-Checking \cite{xiao2022self}. Nevertheless, these rule-based functions may encounter difficulties in generalizing to complex anomalous samples that fall outside the realm of previous observations, potentially slowing down the primary task without fundamentally resolving the reliability concerns associated with DNNs.

The second category is devoted to re-engineering DNNs to acquire distinct representations between ID and non-ID samples for end-to-end detection, thereby substantially enhancing OOD detection performance \cite{augustin2020adversarial}. Prominent contributions in this field include semi-supervised approaches such as OE \cite{hendrycks2018deep} (which utilize a uniform distribution for OOD samples), alongside K+1 Detector \cite{vernekar2019analysis} and SSL \cite{mohseni2020self} (which use single and multiple reject classes for OOD samples, respectively). These two methodological categories are mutually complementary, but the second one can fundamentally address the abnorm input detection problem for DNNs. 

\subsection{Robust Out-of-distribution Detection}
\label{sec:rob-OOD-det}

While semi-supervised training methods \cite{mccoyd2018background, hendrycks2018deep, mohseni2020self, vernekar2019analysis} have effectively addressed the OOD detection problem, existing studies \cite{sehwag2019analyzing, bitterwolf2020provable} show that most advanced OOD detection systems are susceptible to evasion attacks that manipulate OOD samples. This motivates several works aimed at building DNNs that are robust against OOD attacks, as relying solely on complex scoring functions cannot effectively thwart detection-aware adaptive attacks. 

WhyReLU \cite{hein2019relu} provides a mathematical analysis explaining why ReLU DNNs yield high confidence for OOD samples and proposes ACET to mitigate this issue by applying adversarial training to auxiliary OOD data, which have pseudo labels of a uniform distribution. ATOM \cite{chen2021atom} incorporates an additional reject class for OOD samples and proposes an outlier mining step within the adversarial training framework on auxiliary OOD data. Robust Confidence Estimate (RCE) \cite{chen2022robust, augustin2020adversarial} further integrates adversarial training on both ID and OOD data, with pseudo labels of a uniform distribution, to enhance OOD detection robustness. Recently, RobDet \cite{zhou2023fixing} revealed that conducting adversarial training on ID data, as down in RCE, significantly undermines the effective detection of clean OOD examples. Furthermore, it suggests that the adversarial training applied to OOD data in both ACET and ATOM is inadequate for ensuring OOD detection robustness. However, there remains ambiguity regarding the reasons behind the limited effectiveness of adversarial training on OOD data in these methods. This paper aims to investigate how to properly implement adversarial training on auxiliary OOD data to improve OOD detection robustness while maintaining the ability to detect clean OOD samples.

\section{Motivation}
\label{sec:motivation} 

In conventional standard training (without considering the detection of OOD samples), all different samples are assigned their true labels to enable the DNN classifier to learn class-specific discriminative features. If all samples share the same label assignment (i.e., the class node in the last layer is set to 1), the DNN is unable to learn any class-specific knowledge. Similarly, adversarial training also uses the true labels of all different samples to ensure that DNNs learn class-specific robust discriminative features. However, existing adversarially trained OOD detections use a uniform label for diverse OOD samples, which inspires investigation of the effectiveness of them in ensuring OOD detection robustness. 

We utilize training data from the first 8 classes of CIFAR10, training and test data from the last 2 classes of CIFAR10 to simulate ID training data, OOD training data and OOD test data, respectively\footnote{Note that the OOD training and test data used here come from the same distribution, making our experimental results more compelling. If a method fails to prevent attacks from OOD samples within the same distribution, it is less likely to block attacks from OOD samples from different distributions.}. The evaluation attacks include the regular PGD-solved attack and APGD-solved attack we developed, both designed to bypass detection by maximizing the maximum logit of ID classes and minimizing that of `OOD' classes. We used ResNet-18, trained for 100 epochs with an initial learning rate of 0.1, reduced by 10 at the 75th and 90th epochs. For more details on the attacks, please see \secref{sec:exp-setup}. 
% For more details on the attacks, and training settings, see \secref{sec:exp-setup}. 

\begin{table}[!tbp]
  \centering
  \caption{Performance of adversarially-trained detections (AUC). Higher is better. 
  }
  \resizebox{0.6\columnwidth}{!}{%
  \setlength{\tabcolsep}{2.2pt}{
  \begin{tabular}{c|cc}
  \hline
  Method & PGD & APGD \\ \hline
  $U$ (ID+ST, OOD+AT)   & 99.50  & \textcolor[rgb]{1,0,0}{0.40}   \\ 
  $K$+$1$ (ID+ST, OOD+AT)  & 99.74  & \textcolor[rgb]{1,0,0}{0.81}   \\ 
  $K$+$V_{org}$ (ID+ST, OOD+AT)    & 99.74  & \textbf{94.72}    \\ 
  $K$+$V_{rdm}$ (ID+ST, OOD+AT)   & 99.70  & \textcolor[rgb]{1,0,0}{ 0.20 }   \\ \hline \hline
  $K$+$V_{org}$ (ID+AT', OOD+AT)   & 99.81  & \textbf{96.89}  \\ 
  $K$+$V_{rdm}$ (ID+AT', OOD+AT)   & 99.84  & 0.82  \\ \hline
  \end{tabular}
  }
  }
  \label{tab:adv-id-OOD-training}
  \end{table}

\subsection{Using Pseudo-labels That Well Distinguish OOD Samples Is Crucial for the Success of Robust OOD Learning.}
\label{sec:properly-settings-on-at-OOD}

We study the OOD detection robustness (AUC) of models trained under different settings for OOD data: a uniform distribution (denoted as $U$, consistent with ACET), a single additional `OOD' class (denoted as $K$+$1$, where samples from the last two classes in CIFAR10 are both labeled as 9, consistent with ATOM), and two additional `OOD' classes (denoted as $K$+$V_{org}$, where samples from classes 9 and 10 in CIFAR10 retain their original labels of 9 and 10). Moreover, we consider assigning the last two class samples a random label within [9, 10] (denoted as $K$+$V_{rdm}$). The training methods for ID data and OOD data are Standard Training (ST) and Adversarial Training (AT), respectively, which align with the training paradigms commonly used for robust OOD detection.

As shown in the upper part of \tabref{tab:adv-id-OOD-training}, $U$ and $K$+$1$ effectively defend against the weak PGD attack but are significantly compromised by the stronger APGD attack. In contrast, $K$+$V_{org},$ which utilizes true labels for auxiliary OOD samples, successfully counters the APGD attack. However, when pseudo-labels for outliers are randomly assigned within the [9, 10] range, $K$+$V_{rdm}$ is significantly defeated by the APGD attack, similar to $U$ and $K$+$1$. These results validate our assertion that using pseudo-labels that can effectively distinguish OOD data is critical for success of adversarial training on OOD data. 

\begin{figure*}[!tbp]
  \centering
  \includegraphics[width=0.7\textwidth]{DaD-v2.pdf}
  \caption{An intuition example of DaD. The dashed node represents the `OOD' class node, and well-labeled pseudo labels refer to labels that can well distinguish different OOD samples.}
  \label{fig:dad-intition-example}
\end{figure*}

\subsection{Training Adversarial ID Samples as OOD Samples Benefits Robustness Only When OOD Samples Are Well Distinguished.}
\label{sec:at-id-2-detection}

Recent work, RobDet \cite{zhou2023fixing}, reveals that AT on ID data significantly degrades clean OOD detection performance (since AT's min-max framework significantly hurts the prediction confidence for ID examples) and advocates treating adversarial ID samples as near OOD samples to mitigate this issue. We further investigate whether training adversarial ID samples as near OOD samples (denoted as AT') can effectively enhance the robustness of OOD detection. As shown in the lower part of \tabref{tab:adv-id-OOD-training}, aside from the robustness of $K$+$V$, the APGD robustness of other methods do not show significant enhancement after further training adversarial ID samples as OOD samples. These results demonstrate that mining adversarial ID samples as OOD samples can be beneficial for improving OOD detection robustness only if the OOD data use pseudo-labels that can distinguish them well. Next, we present DaD. 

\section{Proposed $\mbox{DaD}$}
\label{sec:proposed-dad}

% We first present how to perform detection based on additional `OOD' classes in DaD in Section 4.1. Then we present the key step setup of DaD in Section 4.2: how to generate multi-class labels for unlabelled auxiliary OOD data that are useful for distinguishing them. Finally, we present the training objectives of DaD in detail in Section 4.3.

We first provide an overall example of DaD in \secref{sec:dad-intition-example}, followed by the scoring function, pseudo-labeling methods and training objective of DaD in \secref{sec:dad-sc}, \secref{sec:pseudo-label-method}, and \secref{sec:training-obj}, respectively.

\subsection{An Overall Example}
\label{sec:dad-intition-example}

DaD ingeniously uses the defense derived from adversarial training on the additional "OOD" classes as detection to enhance the robustness of DNNs against OOD attacks. \figref{fig:dad-intition-example} illustrates an overall example of DaD, where the last two dashed nodes represent the `OOD' classes $y^o_1$ and $y^o_2$, respectively. Assuming two different auxiliary OOD samples $x^o_1$ and $x^o_2$, DaD uses $y^o_1$ and $y^o_2$ to distinguish between them (\secref{sec:pseudo-label-method}) and performs adversarial training for their adversarial samples $x^o_1 + \delta^o_1$ and $x^o_2 + \delta^o_2$ on the last two `OOD' class nodes (\secref{sec:training-obj}). This approach makes the last `OOD' class nodes compete with each other to extract robust OOD discriminative features, achieving a robust defense against adversarial perturbations. Additionally, DaD trains adversarial ID samples $x^{in}_1 + \delta^{in}_1$ and $x^{in}_2 + \delta^{in}_2$ as near-OOD samples by assigning them pseudo-labels $y^{in}_1$ and $y^{in}_2$ (\secref{sec:pseudo-label-method}), respectively, to encourage the DNN to output high confidence for them on the last two `OOD' class nodes. Consequently, DaD uses the defense on the last two `OOD' class nodes as detection to robustly capture attacks, achieving end-to-end robust detection. 

\subsection{Scoring Function}
\label{sec:dad-sc}

DaD utilizes additional `OOD' classes to represent OOD samples, including adversarial ID samples, clean OOD samples, and adversarial OOD samples, to achieve end-to-end robust detection. The scoring function used to distinguish between ID and non-ID samples in DaD is the Sum of Softmax Probabilities of `OOD' classes (SSP$_{O}$): 
\begin{equation}
  D(x):\mbox{ }
  \left\{\begin{matrix}
    \begin{aligned}
      OOD&, \mbox{ } if \mbox{ } \sum_{v=K+1}^{K+V}f_{\theta}(x)_{v} > \tau \\
      ID&, \mbox{ } elsewise \\
    \end{aligned}
\end{matrix}\right.
\label{eq:dad-scoring-func}
\end{equation}
where $K$ and $V$ are the numbers of ID and `OOD' classes, respectively, $f_{\theta}(x)_{v}$ is the softmax probability over the $v$-th `OOD' class, and $\tau$ is the scoring threshold determined in the test phase. \equaref{eq:dad-scoring-func} uses SSP$_{O}$ to conduct end-to-end detection without introducing any significantly computational cost. For an ID sample, DaD uses the class with the Maximum Softmax Probability (MSP) within the first $K$ ID classes, i.e., $\arg\max(f_{\theta}(x)_{[1:K]})$, as the prediction result.  

\subsection{Pseudo-labeling Methods}
\label{sec:pseudo-label-method}

As demonstrated in \secref{sec:properly-settings-on-at-OOD}, using multi-class labels that can effectively distinguish different auxiliary OOD samples is crucial for the success of adversarial training on OOD data. However, in practice, most auxiliary OOD samples may be unlabeled, and manually labeling them would consume a significant, even unacceptable, amount of manpower. Consequently, generating pseudo-labels for distinguishing different auxiliary OOD samples becomes particularly important for robust OOD learning. An intuitive method may be to use an additional pre-trained classifier or even other clustering algorithms to classify or cluster auxiliary OOD samples for obtaining pseudo-labels. However, this complicates the training process and is inconvenient for training from scratch. Inspired by self-learning methods \cite{golan2018deep, mohseni2020self}, we propose the following pseudo-labeling method for auxiliary OOD samples:
\begin{equation}
  y^o=K + \arg\max(f_{\theta}(x^{o}))\%V
\label{eq:dad-self-labelling-ood}
\end{equation}
where $\arg\max(f_{\theta}(x^{o}))$ represents the MSP of all classes (including additional `OOD' classes) for the clean OOD sample $x^{o}$, and \% is the modulo symbol. In \equaref{eq:dad-self-labelling-ood}, if the DNN predicts the class of $x^{o}$ to be an `OOD' class, the prediction is directly used as the pseudo-label for $x^{o}$. However, if the predicted class is an ID class, a modulo operation on $V$ is applied to obtain the pseudo-label. This modeling operation facilitates the stable differentiation of OOD samples and eliminates the adverse effects of random factors on training. 

For adversarial ID samples, we consider the following pseudo-labeling method:
\begin{equation}
  y^o=
  \left\{\begin{matrix}
      \arg\max(f_{\theta}(x^{in})), & \mbox{if } \arg\max(f_{\theta}(x^{in})) > K \\
      \mbox{Random}[K, K+V], & \mbox{otherwise}
\end{matrix}\right.
\label{eq:dad-self-labelling-advid}
\end{equation}
In \equaref{eq:dad-self-labelling-advid}, if the class with the MSP of $x^{in}$ is greater than $K$, that predicted `OOD' class is used as its pseudo-label; otherwise, a random integer is selected from $[K, K+V]$ as its pseudo-label. Note that, whether in \equaref{eq:dad-self-labelling-ood} or \equaref{eq:dad-self-labelling-advid}, we always use the clean counterparts of adversarial OOD or ID samples to obtain their pseudo-labels.

\subsection{Training Objective}
\label{sec:training-obj}

% &\mbox{where } \delta_{i}^{in*} = {\arg\max}_{||\delta_{i}^{in}||\leq
% \epsilon} -\log(x_{i}^{in}+\delta_{i}^{in})_{\widehat{y}_{i}^{in}}  \mbox{ and}\\
% &\delta_j^{o*}={\arg\max}_{||\delta_{j}^{o}|| \leq \epsilon} -\log(\max(f_{\theta}(x_j^o+\delta_j^o)_{[K+1:K+V]})) \\

% DaD performs adversarial training using auxiliary OOD data on the additional `OOD' class nodes and exploits adversarial ID samples as auxiliary near-OOD samples to enhance the robustness of DNNs in detecting attacks. Formally, its training objective is:
% \begin{equation}
%   \begin{aligned}
%   \arg\min_{\theta}
%   \frac{1}{2N} &\sum_{i=1}^{N} [\ell(f_{\theta}(x_{i}^{in}), y_{i}^{in}) +\ell(f_{\theta}(x_{i}^{in}+\delta_{i}^{in*}), y_{i}^{o})] \\ + \beta \cdot \frac{1}{M} [&\sum_{c=1}^{\frac{M}{2}}\ell(f_{\theta}(x_{c}^{o}), y_{c}^{o}) + \sum_{r=\frac{M}{2}+1}^{M}\ell(f_{\theta}(x_{r}^{o}+\delta_{r}^{o*}), y_{r}^{o})]\\
%   \end{aligned}
%   \label{eq:dad-training-obj}
% \end{equation}
% where $N$ and $M$ are the numbers of ID and auxiliary OOD training samples, respectively, $\beta$ is the balance weight between ID and OOD samples, $x_{i}^{in}$ and $x_{c}^{o}$ are the $i$-th ID sample with label $y_{i}^{in}$ and $c$-th OOD sample with pseudo label $y_{c}^{o}$, respectively, and $x_{i}^{in}+\delta_{i}^{in*}$ and $x_{r}^{o}+\delta_{r}^{o*}$ are adversarial ID and adversarial OOD samples corresponded to the $i$-th ID and $r$-th clean OOD samples, respectively. The pseudo labels $y_{i}^{o}$ and $y_{c}^{o}$ of $x_{i}^{in}+\delta_{i}^{in*}$ and $x_{r}^{o}+\delta_{r}^{o*}$ are generated according to \equaref{eq:dad-self-labelling-advid} adn \equaref{eq:dad-self-labelling-ood}, respectively. During training, the optimal perturbation $\delta_{i}^{in*}$ is optimized by maximizing the CE loss on $x_i^{in}$ and its ground-truth label $y_i^{in}$: $\delta_{i}^{in*} = {\arg\max}_{||\delta_{i}^{in}||\leq \epsilon} -\log(x_{i}^{in}+\delta_{i}^{in})_{y_{i}^{in}}$, and the optimal perturbation $\delta_{r}^{o*}$ is solved by maximizing the CE loss on $x_r^{o}$ and the `OOD' class with the maximum probability: $\delta_r^{o*}={\arg\max}_{||\delta_{r}^{o}|| \leq \epsilon} -\log(\max(f_{\theta}(x_r^o+\delta_r^o)_{[K+1:K+V]}))$. 

% With pseudo-labels that can well distinguish different OOD samples, the second term of \equaref{eq:dad-training-obj} makes each `OOD' class node competes with each other to extract their class-specific, robust OOD discriminative features, utilizing the robust defense over `OOD' class nodes as robust detection. Moreover, the first term of \equaref{eq:dad-training-obj} assigns adversarial ID samples a label within the `OOD' classes, further enhancing the DNN's capability to output high confidence on adversarially perturbed samples in the low-density boundaries of the in-distribution. It is worth noting that the first term does not force the DNN to ignore perturbation features on ID samples, thus significantly mitigating the impact on DNN accuracy. By combining these two terms, the `OOD' class nodes can robustly output high confidence against OOD attacks. 

DaD performs adversarial training using auxiliary OOD data on the additional `OOD' class nodes and exploits adversarial ID samples as auxiliary near-OOD samples to enhance the robustness of DNNs in detecting attacks. Formally, its training objective is:
\begin{equation}
  \begin{aligned}
  \arg\min_{\theta}
  \frac{1}{2N} &\sum_{i=1}^{N} [\ell(f_{\theta}(x_{i}^{in}), y_{i}^{in}) +\ell(f_{\theta}(x_{i}^{in}+\delta_{i}^{in*}), y_{i}^{o})] \\ + \beta \cdot \frac{1}{M} [&\sum_{c=1}^{\frac{M}{2}}\ell(f_{\theta}(x_{c}^{o}), y_{c}^{o}) + \sum_{r=\frac{M}{2}+1}^{M}\ell(f_{\theta}(x_{r}^{o}+\delta_{r}^{o*}), y_{r}^{o})]\\
  \end{aligned}
  \label{eq:dad-training-obj}
\end{equation}
where $N$ and $M$ are the numbers of ID and auxiliary OOD training samples, respectively, $\beta$ is the balance weight between ID and OOD samples, $x_{i}^{in}$, $x_{c}^{o}$ and $x_{r}^{o}$ are the $i$-th ID sample, $c$-th OOD sample and $r$-th OOD sample with pseudo labels of $y_{i}^{in}$, $y_{c}^{o}$ and $y_{r}^{o}$, respectively, and $x_{i}^{in}+\delta_{i}^{in*}$ and $x_{r}^{o}+\delta_{r}^{o*}$ are adversarial ID and adversarial OOD samples corresponding to the $i$-th ID and $r$-th clean OOD samples, respectively. Pseudo labels $y_{i}^{o}$, $y_{c}^{o}$ and $y_{r}^{o}$ are generated according to \equaref{eq:dad-self-labelling-advid}, \equaref{eq:dad-self-labelling-ood} and \equaref{eq:dad-self-labelling-ood}, respectively. During training, the optimal perturbation $\delta_{i}^{in*}$ is optimized by maximizing the CE loss on $x_i^{in}$ and its ground-truth label $y_i^{in}$: $\delta_{i}^{in*} = {\arg\max}_{||\delta_{i}^{in}||\leq \epsilon} -\log(x_{i}^{in}+\delta_{i}^{in})_{y_{i}^{in}}$, and the optimal perturbation $\delta_{r}^{o*}$ is solved by maximizing the CE loss on $x_r^{o}$ and the `OOD' class that has the maximum probability: $\delta_r^{o*}={\arg\max}_{||\delta_{r}^{o}|| \leq \epsilon} -\log(\max(f_{\theta}(x_r^o+\delta_r^o)_{[K+1:K+V]}))$. 

With pseudo-labels that can well distinguish different OOD samples, the second term of \equaref{eq:dad-training-obj} makes each `OOD' class node compete with each other to extract their class-specific, robust OOD discriminative features, utilizing the robust defense over `OOD' class nodes as robust detection. Moreover, the first term of \equaref{eq:dad-training-obj} assigns adversarial ID samples a label within the `OOD' classes, further enhancing the DNN's capability to output high confidence on adversarially perturbed samples in the low-density boundaries of the in-distribution. It is worth noting that the first term does not force the DNN to ignore the perturbation features of ID samples, thus significantly mitigating the impact on DNN accuracy. By combining these two terms, the `OOD' class nodes can robustly produce high confidence outputs against OOD attacks. 

\section{Experiments}
\label{sec:expm}

We present experimental setup, main results and ablation studies in \secref{sec:exp-setup}, \secref{sec:main-res}, and \secref{sec:ablation-studies}, respectively. 

\subsection{Experimental Setup}
\label{sec:exp-setup}

In the robustness research field, when validating the robustness of a method (model), the maximum perturbation radius for training and testing is typically set to be the same unless otherwise specified. This paper follows the mainstream settings in the field of robustness research, considering the $L_{\infty}$ threat mode with a maximum perturbation radius of 8/255. Next, we detail datasets (\secref{sec:datasets}), training settings (\secref{sec:training-settings}), evaluation metrics (\secref{sec:eval-metrics}) and attacks for evaluation (\secref{sec:reliable-attacks}). 

\subsubsection{Datasets} 
\label{sec:datasets} 

\textbf{\\In-distribution dataset}. 
Covering the majority of ID datasets considered in robust OOD detection studies, we select four specific datasets: SVHN \cite{netzer2011reading}, CIFAR10, CIFAR100 \cite{krizhevsky2009learning}, and Tiny-ImageNet-200\footnote{http://cs231n.stanford.edu/tiny-imagenet-200.zip}. The SVHN dataset features 73,257 training images and 26,032 test images, each representing a house number from 0 to 9 in a 32x32 pixel format. CIFAR10 consists of 10 classes, providing 50,000 training images and 10,000 test images. In contrast, CIFAR100 includes 100 classes, with the same number of training and test samples as CIFAR10. Tiny-ImageNet-200, a subset of ImageNet-1K \cite{deng2009imagenet, russakovsky2015imagenet}, encompasses 200 classes, each containing 500 images. The images in Tiny-ImageNet-200 are resized to 64x64 pixels, making this dataset more suitable for computational tasks such as adversarial training. \\
\textbf{Auxiliary Out-of-Distribution Training Set}. For datasets with a resolution of 32x32, we primarily use the downsampled ImageNet-RC (32x32) \cite{chrabaszcz2017downsampled} as our auxiliary OOD training set. To ensure there is no overlap with CIFAR class labels, we utilize the NLTK library to filter out samples from ImageNet-RC that exhibit a path similarity greater than 0.35 to CIFAR labels. Additionally, we consider the 300K Random Images (300K)\footnote{https://github.com/hendrycks/outlier-exposure} as an alternative auxiliary OOD training set, removing any overlapping samples based on the method outlined in \cite{hendrycks2018deep}. For Tiny-ImageNet-200, we use downsampled 64x64 samples from the remaining 800 classes of ImageNet-RC as the auxiliary OOD training set.\\
\textbf{Out-of-Distribution Test Set}. For 32x32 resolution datasets, we employ five datasets for OOD testing: Textures \cite{cimpoi2014describing}, iSUN \cite{xu2015turkergaze}, Places365 \cite{zhou2017places}, LSUN (crop), and LSUN (resize) \cite{yu2015lsun}. If the ID training set is SVHN, we include the test sets of CIFAR10 and CIFAR100 as OOD test sets. Conversely, if the ID training set is CIFAR10 or CIFAR100, we treat the SVHN test set as an OOD test set. These individual OOD test sets are combined to create a comprehensive OOD test set for default result reporting. For Tiny-ImageNet-200, we use Places365 and Textures as OOD test sets, resizing them to 64x64 resolution. Similar to the 32x32 datasets, we combine these two datasets and report results based on the mixture by default.\\

\subsubsection{Training Settings}
\label{sec:training-settings}

We train ResNet-18 \cite{he2016deep} on SVHN for 50 epochs and WRN-40-4 \cite{zagoruyko2016wide} on CIFAR10, CIFAR100, and Tiny-ImageNet-200 for 200 epochs. The training utilizes the SGD optimizer with a momentum of 0.9, a weight decay of 0.0005, and an initial learning rate (LR) of 0.1. We set the parameter $\beta$ in \equaref{eq:dad-training-obj} to 1 for SVHN, CIFAR10 and CIFAR100 but set it to 0.5 for Tiny-ImageNet-200. For SVHN, the batch sizes for ID and OOD data are 128 and 256, respectively, with the LR reduced at 25 and 40 epochs. However, for CIFAR10, CIFAR100, and Tiny-ImageNet-200 we use the same settings as SVHN, but decay the LR at 150 and 180 epochs. Standard data augmentation techniques, including random cropping with padding (4 pixels for 32x32 resolution datasets and 8 pixels for 64x64 resolution datasets) and random flipping, are applied to all datasets. During training, PGD-10 and PGD-20 attacks with an attack radius of 8/255 and an attack step of 2/255 are applied to all ID and auxiliary OOD training sets, except for the SVHN training set, where the attack step is 1/255. Additionally, we implement a warm-up strategy for training adversarial ID samples as well as clean and adversarial OOD samples. Specifically, adversarial training on OOD data and the training of adversarial ID samples are conducted after the 100th epoch for all ID datasets. For Tiny-ImageNet-200, training on clean OOD data begins only after the 10th epoch. After training, a TAPGD$^{io}_{ce}$ attack (described in \secref{sec:reliable-attacks}) is applied to a mini-validation set, which randomly selects 256 test OOD samples from each OOD test set, in order to determine the optimal checkpoint. 

% % Please add the following required packages to your document preamble:
% % \usepackage{multirow}
% % \usepackage{graphicx}
% \begin{table*}[!htbp]
%   \centering
%   \caption{Robust performance in detecting ID attacks. Models of each method evaluated are the same ones as those in \tabref{tab:results-mixed-OOD}.}
%   \resizebox{0.8\textwidth}{!}{%
%   \setlength{\tabcolsep}{2.2pt}{
%   \begin{tabular}{c|c|cc|cc|cc|cc|cc|cc}
%   \hline
%    \multicolumn{2}{l|}{} 
%    & \multicolumn{12}{c}{ AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
%   \multicolumn{1}{c|}{D$^{in}$} 
%     & \multicolumn{1}{c|}{Method} 
%       % & \multicolumn{1}{c|}{Acc} 
%         & \multicolumn{2}{c|}{PGD$^{io}_{ce}$ } 
%         & \multicolumn{2}{c|}{PGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{APGD$^{io}_{ce}$ } 
%         & \multicolumn{2}{c|}{APGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{TAPGD$^{io}_{ce}$ } 
%         & \multicolumn{2}{c}{TAPGD$^{io}_{lgt}$ } \\ \hline
%   \multicolumn{1}{l|}{\multirow{6}{*}{ \rotatebox{270}{SVHN} }} 
%     % & \multicolumn{1}{l|}{ BKG }  % & 00.-- 
%     %   & 99.99   & 100.0 
%     %   & 99.99   & 100.0  
%     %   & 99.79   & 99.79 
%     %   & 12.60   & 1.08 
%     %   & 12.77   & 0.26  
%     %   & 1.77   & 0.06   \\
%     & \multicolumn{1}{l|}{ACET}  % & 95.93 
%       & 99.99   & 100.0 
%       & 99.99   & 100.0 
%       & 64.93   & 52.82 
%       & 34.60   & 23.82 
%       & 32.48   & 21.08  
%       & 31.37   & 19.38   \\
%     & \multicolumn{1}{l|}{ATOM}   % & 95.98  
%       & 99.99   & 100.0 
%       & 99.99   & 100.0 
%       & 99.96   & 99.70
%       & 21.60   & 12.93 
%       & 17.84   & 9.83  
%       & 16.43   & 7.35   \\
%     & \multicolumn{1}{l|}{RobDet}  
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.--   \\
%     & \multicolumn{1}{l|}{DaD$^l$}  %  & 95.86 
%       & 99.99   & 100.0 
%       & 99.99   & 100.0 
%       & 98.94   & 98.07 
%       & 98.92   & 98.07 
%       & 98.89   & 97.61  
%       & 98.88   & 96.89   \\
%     & \multicolumn{1}{l|}{DaD$^{ul}$}  %  & 00.-- 
%       & 99.99   & 100.0 
%       & 99.99   & 100.0 
%       & 99.04   & 98.17 
%       & \textbf{ 98.92 }   & \textbf{ 98.31 } 
%       & \textbf{ 98.87 }   & \textbf{ 98.29 }  
%       & \textbf{ 98.91 }   & \textbf{ 98.31 }  \\ \hline
%   \multicolumn{1}{l|}{\multirow{6}{*}{ \rotatebox{270}{CIFAR10} }} 
%     % & \multicolumn{1}{l|}{ BKG }  % & 00.-- 
%     %   & 99.99   & 100.0 
%     %   & 99.99   & 100.0 
%     %   & 98.92   & 98.28 
%     %   & 2.23     & 0.04 
%     %   & 1.15     & 0.04   
%     %   & 1.12     & 0.04    \\
%     & \multicolumn{1}{l|}{ACET}  % & 95.41 
%       & 98.98   & 100.0 
%       & 98.82   & 100.0 
%       & 45.73   & 32.43 
%       & 42.92   & 28.26 
%       & 26.53   & 14.21  
%       & 24.52   & 10.94   \\
%     & \multicolumn{1}{l|}{ATOM}  % & 95.93  
%       & 99.99   & 100.0 
%       & 99.99   & 100.0 
%       & 99.99   & 100.0  
%       & 0.00   & 0.00 
%       & 0.01   & 0.00  
%       & 0.00   & 0.00   \\
%     & \multicolumn{1}{l|}{RobDet}  
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.--   \\
%     & \multicolumn{1}{l|}{DaD$^l$}  % & 95.42  
%       & 98.97   & 100.0 
%       & 98.97   & 100.0 
%       & 98.94   & 95.83 
%       & 97.85   & 92.21 
%       & 97.20   & 91.94  
%       & 97.18   & 91.73   \\
%     & \multicolumn{1}{l|}{DaD$^{ul}$}  % & 00.-- 
%       & 99.97   & 100.0 
%       & 99.97   & 100.0 
%       & 99.97   & 100.0   
%       & \textbf{ 99.86 }   & \textbf{ 99.91 }  
%       & \textbf{ 98.16 }   & \textbf{ 96.80 } 
%       & \textbf{ 98.82 }   & \textbf{ 96.80 }    \\ \hline
%   \multicolumn{1}{l|}{\multirow{6}{*}{\rotatebox{270}{CIFAR100} }} 
%     % & \multicolumn{1}{l|}{ BKG }  % & 00.-- 
%     %   & 91.97   & 96.99 
%     %   & 91.96   & 96.96  
%     %   & 90.76   & 94.75 
%     %   & 1.34   & 0.04 
%     %   & 1.01   & 0.04  
%     %   & 0.97   & 0.04   \\
%     & \multicolumn{1}{l|}{ACET}   % & 77.96 
%       & 93.72   & 99.99 
%       & 93.72   & 99.96  
%       & 28.69   & 14.26
%       & 28.43   & 14.26
%       & 13.92   & 9.68  
%       & 13.12   & 9.67     \\
%     & \multicolumn{1}{l|}{ATOM}  %  & 79.28 
%       & 93.89   & 99.99 
%       & 93.81   & 98.89  
%       & 93.80   & 98.90 
%       & 1.92   & 0.06 
%       & 1.72   & 0.06  
%       & 1.77  & 0.06   \\
%     & \multicolumn{1}{l|}{RobDet}  
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.-- 
%       & 0.--   & 0.--   \\
%     & \multicolumn{1}{l|}{DaD$^l$}  % & 74.01
%       & 99.95   & 100.0  
%       & 99.95   & 100.0  
%       & 99.95   & 100.0 
%       & 86.15   & 60.44
%       & 86.10   & 60.39  
%       & 86.12   & 60.41   \\
%     & \multicolumn{1}{l|}{DaD$^{ul}$}  % & 00.-- 
%       & 99.61    & 100.0  
%       & 99.61    & 100.0   
%       & 99.61    & 100.0   
%       & \textbf{ 99.12 }   & \textbf{ 99.51} 
%       & \textbf{ 97.74 }   & \textbf{ 97.99 }   
%       & \textbf{ 97.76 }   & \textbf{ 97.99 }  \\ \hline
%   \end{tabular}%
%   }
%   }
%   \label{tab:results-advid}
%   \end{table*}

\subsubsection{Evaluation Metrics}
\label{sec:eval-metrics}

In the fields of adversarial detection and out-of-distribution detection, adversarial ID samples, along with both clean and adversarial OOD samples, are classified as positive samples, whereas clean ID samples are treated as negative samples. We consider the following metrics: \\
\textbf{Area Under the ROC Curve (AUC)}. This metric evaluates the overall detection performance across various scoring thresholds $\tau$. Usually, a higher AUC value indicates a better detection performance.\\
\textbf{True Positive Rate at True Negative Rate N (TPR-N)}. This metric quantifies the correct detection rate of true OOD samples when N\% of clean ID samples are accurately identified. An increased TPR-$N$ signifies enhanced detection performance. It is crucial to note that for TPR-$N$, the threshold $\tau$ is determined based on the clean ID test samples and remains independent of the OOD samples being processed. In this work, we conventionally use TPR-95 for the simpler datasets SVHN and CIFAR10, while opting for TPR-90 for the more complex datasets CIFAR100 and Tiny-ImageNet-200. 

\subsubsection{Attacks for Reliable Evaluation}
\label{sec:reliable-attacks}

In this work, we focus on the end-to-end robustness of DNNs against OOD attacks when utilizing class probabilities. Thus, the OOD attacks aimed at bypassing detection are somewhat similar to traditional ID attacks that mislead DNN classifiers, with the difference that OOD attacks only need to obtain high prediction probabilities on arbitrary ID classes. However, for detections that use additional reject classes, OOD attacks also need to simultaneously minimize the softmax prediction probabilities on these OOD classes to evade detection. To ensure the attacks are sufficiently effective for reliable evaluation, we utilize not only the standard PGD search algorithm but also focus on the robust APGD \cite{croce2020reliable} search algorithm to solve all adversarial losses.

The attacks that manipulate the softmax probabilities using cross-entropy loss can be unified as follows: 
\begin{equation}
  \begin{split}
    \log(f_{\theta}(x+\delta)_{t^{in}}) - \log(\max(f_{\theta}(x+\delta)_{[K:K+V]}))
  \end{split}
\label{eq:attacks-ce}
\end{equation}
In \equaref{eq:attacks-ce}, $t^{in}=\arg\max(f_{\theta}(x+\delta)_{[1:K]})$ is the class with the Maximum Softmax Probability (MSP) among the ID classes, and $\max(f_{\theta}(x+\delta)_{[K:K+V]})$ is the class with the MSP among the OOD classes. We denote the attacks from \equaref{eq:attacks-ce} solved by PGD and APGD as PGD$^{io}_{ce}$ and APGD$^{io}_{ce}$, respectively. 

Similar to CW loss, we also consider directly maximizing the maximum logit of the ID class while minimizing the maximum logit of the OOD classes to facilitate evasion:
\begin{equation}
  z_{\theta}(x+\delta)_{t^{in}} - \max(z_{\theta}(x+\delta)_{[K:K+V]})
\label{eq:attacks-logit}
\end{equation}
where $t^{in}$ indexes the maximum logit of the ID classes, and $\max(z_{\theta}(x+\delta)_{[K:K+V]})$ represents the maximum logit of the OOD classes. For clarity, we denote the attacks from \equaref{eq:attacks-logit} solved by PGD and APGD as PGD$^{io}_{lgt}$ and APGD$^{io}_{lgt}$, respectively. For detections that do not use any additional OOD classes, e.g., OE \cite{hendrycks2018deep}, ACET \cite{hein2019relu}, and ACE \cite{chen2022robust,augustin2020adversarial}, we set the last terms of \equaref{eq:attacks-ce} and \equaref{eq:attacks-logit} to zero.

Moreover, we consider the multi-targeted surrogate loss \cite{gowal2019alternative} solved by the APGD search algorithm to more thoroughly defeat detection. Specifically, we set $t^{in}$ in the first terms of \equaref{eq:attacks-ce} and \equaref{eq:attacks-logit} to each ID class in turn and then find the worst-case OOD samples that bypass detection with the highest scores. For convenience, we denote these multi-targeted version attacks derived from \equaref{eq:attacks-ce} and \equaref{eq:attacks-logit} as TAPGD$^{io}_{ce}$ and TAPGD$^{io}_{lgt}$, respectively. For all non-targeted (multi-targeted) attacks, we set the iterations and restarts to 100 with 5 (1), respectively, following the default settings of \cite{croce2020reliable}.

Overall, we investigate the following questions: \\
\textbf{RQ1}: \emph{Can DaD effectively improve the end-to-end robustness of DNNs in detecting OOD attacks without comprising clean OOD detection performance?} \\
\textbf{RQ2}: \emph{Is DaD sensitive to the auxiliary dataset, and does it maintain its effectiveness in enhancing OOD detection robustness as the dataset complexity increases?} \\
\textbf{RQ3}: \emph{Does the number of `OOD' classes have a significant impact on the detection robustness of the DaD model, and how to select the optimal number of `OOD' classes?} \\
% \textbf{RQ4}: \emph{How does using the MSP of the 'OOD' classes instead of their confidence sum as a scoring function affect detection robustness?} \\
% \textbf{RQ5}: \emph{How does the two-stage training algorithm of DaD$^{self}$ influence the detection robustness of DNNs?} 

% Please add the following required packages to your document preamble:
% \usepackage{multirow}
% \usepackage{graphicx}
\begin{table*}[!htbp]
  \centering
  \caption{Robust OOD detection performance on the mixed OOD test set. The auxiliary OOD training set for SVHN, CIFAR-10, and CIFAR-100 is ImageNet-RC (32*32), while for Tiny-ImageNet-200 (Tiny-200), it is Tiny-ImageNet-800. TPR-$N$ refers to TPR-95 for SVHN and CIFAR10 but TPR-90 for CIFAR100. $\uparrow$ indicates higher is better. The best results are highlighted in bold, while undesirable results are marked in red. All results under the PGD attacks are presented in gray due to their weak effectiveness.}
  \resizebox{0.8\textwidth}{!}{%
  \setlength{\tabcolsep}{2.2pt}{
  \begin{tabular}{c|c|c|cc|cc|cc|cc|cc|cc|cc}
  \hline
   \multicolumn{3}{l|}{} 
   & \multicolumn{14}{c}{ AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
  \multicolumn{1}{c|}{D$^{in}$} 
    & \multicolumn{1}{c|}{Method} 
      & \multicolumn{1}{c|}{Acc} 
        & \multicolumn{2}{c|}{ Clean } 
        & \multicolumn{2}{c|}{ PGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ TAPGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c}{ TAPGD$^{io}_{lgt}$ } \\ \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{ \rotatebox{270}{SVHN} }} 
    & \multicolumn{1}{l|}{OE}  & 95.87
      & 99.48 & 99.62 
      & \textcolor[rgb]{0.4, 0.4, 0.4}{54.97} & \textcolor[rgb]{0.4, 0.4, 0.4}{36.08} 
      & \textcolor[rgb]{0.4, 0.4, 0.4}{55.64} & \textcolor[rgb]{0.4, 0.4, 0.4}{55.89}
      & \textcolor[rgb]{1,0,0}{36.64} & \textcolor[rgb]{1,0,0}{16.76} 
      & \textcolor[rgb]{1,0,0}{36.75} & \textcolor[rgb]{1,0,0}{16.46}
      & \textcolor[rgb]{1,0,0}{30.74} & \textcolor[rgb]{1,0,0}{16.67} 
      & \textcolor[rgb]{1,0,0}{30.75} & \textcolor[rgb]{1,0,0}{16.67}  \\ 
    & \multicolumn{1}{l|}{SOFL}  & 95.98
      & 99.75 & 99.86  
      & \textcolor[rgb]{0.4, 0.4, 0.4}{46.81} & \textcolor[rgb]{0.4, 0.4, 0.4}{22.19}
      & \textcolor[rgb]{0.4, 0.4, 0.4}{46.76} & \textcolor[rgb]{0.4, 0.4, 0.4}{22.07}
      & \textcolor[rgb]{1,0,0}{31.84} & \textcolor[rgb]{1,0,0}{ 16.28 }
      & \textcolor[rgb]{1,0,0}{31.82} & \textcolor[rgb]{1,0,0}{ 16.28 }
      & \textcolor[rgb]{1,0,0}{28.15} & \textcolor[rgb]{1,0,0}{ 14.82 }
      & \textcolor[rgb]{1,0,0}{28.24} & \textcolor[rgb]{1,0,0}{ 14.69 }  \\ 
    & \multicolumn{1}{l|}{ACET}  & 96.07
      & 97.54   & 90.15   
      & 97.37   & 89.84
      & 97.26   & 89.12
      & 53.83   & 37.34 
      & 53.71   & 36.73  
      & 52.69  & 36.15
      & 52.61  & 36.04  \\
    & \multicolumn{1}{l|}{ATOM}  & 95.79  
      & 98.26   & 93.59   
      & 98.26   & 93.59 
      & 98.25   & 93.59 
      & 69.00   & 61.40 
      & 67.19   & 60.04   
      & 66.48  & 58.29
      & 66.17  & 58.01  \\
    & \multicolumn{1}{l|}{RCE}  & 93.38
      & 98.94   & 96.75   
      & 95.49   & 78.43
      & 95.58   & 78.59
      & 95.25   & 77.73 
      & 95.30   & 77.53
      & 94.67   & 76.32
      & 94.73   & 76.44  \\
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{ \rotatebox{270}{CIFAR10} }} 
    & \multicolumn{1}{l|}{OE}  & 95.29
      & 97.28 & 96.67 
      & \textcolor[rgb]{0.4, 0.4, 0.4}{53.87 } & \textcolor[rgb]{0.4, 0.4, 0.4}{9.98}
      & \textcolor[rgb]{0.4, 0.4, 0.4}{51.08 } & \textcolor[rgb]{0.4, 0.4, 0.4}{9.98} 
      & \textcolor[rgb]{1,0,0}{7.61 } & \textcolor[rgb]{1,0,0}{1.87}
      & \textcolor[rgb]{1,0,0}{7.59 }  & \textcolor[rgb]{1,0,0}{1.87}
      & \textcolor[rgb]{1,0,0}{2.99 }  & \textcolor[rgb]{1,0,0}{0.97} 
      & \textcolor[rgb]{1,0,0}{2.96 }  & \textcolor[rgb]{1,0,0}{0.97}  \\ 
    & \multicolumn{1}{l|}{SOFL}  & 93.96
      & 98.83 & 96.89 
      & \textcolor[rgb]{0.4, 0.4, 0.4}{ 31.70 }  & \textcolor[rgb]{0.4, 0.4, 0.4}{3.21}
      & \textcolor[rgb]{0.4, 0.4, 0.4}{ 31.72 }  & \textcolor[rgb]{0.4, 0.4, 0.4}{3.21}
      & \textcolor[rgb]{1,0,0}{ 8.83 }   & \textcolor[rgb]{1,0,0}{1.06} 
      & \textcolor[rgb]{1,0,0}{ 8.55 }  & \textcolor[rgb]{1,0,0}{ 1.06 }
      & \textcolor[rgb]{1,0,0}{ 2.64 }  & \textcolor[rgb]{1,0,0}{ 0.76 } 
      & \textcolor[rgb]{1,0,0}{ 2.70 }  & \textcolor[rgb]{1,0,0}{ 0.76 }   \\  
    & \multicolumn{1}{l|}{ACET}   & 95.25 
      & 90.94   & 53.43   
      & 90.94   & 53.43
      & 90.94   & 53.43
      & 1.85   & 0.97
      & 2.54   & 1.36  
      & 0.52   & 0.07
      & 0.46   & 0.11  \\
    & \multicolumn{1}{l|}{ATOM}   & 94.81  
      & 92.68   & 74.96   
      & 92.68   & 74.96
      & 92.68   & 74.96
      & 0.79   & 0.07
      & 0.90   & 0.07  
      & 1.33   & 0.27
      & 1.24   & 0.23 \\
    & \multicolumn{1}{l|}{RCE}   & 84.73  
      & 95.88   & 79.88
      & 86.47   & 48.90 
      & 86.53   & 48.90
      & 86.12   & 47.69 
      & 86.18   & 47.85  
      & 84.81  & 44.88
      & 84.85  & 45.00  \\
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{\rotatebox{270}{CIFAR100} }} 
    & \multicolumn{1}{l|}{OE}  & 78.44
      & 90.36 & 50.72
      & \textcolor[rgb]{0.4, 0.4, 0.4}{40.36 } & \textcolor[rgb]{0.4, 0.4, 0.4}{1.01}
      & \textcolor[rgb]{0.4, 0.4, 0.4}{27.33 } & \textcolor[rgb]{0.4, 0.4, 0.4}{0.61 } 
      & \textcolor[rgb]{1,0,0}{1.88 } & \textcolor[rgb]{1,0,0}{0.11}
      & \textcolor[rgb]{1,0,0}{1.82 } & \textcolor[rgb]{1,0,0}{0.10} 
      & \textcolor[rgb]{1,0,0}{0.30 } & \textcolor[rgb]{1,0,0}{0.07}  
      & \textcolor[rgb]{1,0,0}{0.31 } & \textcolor[rgb]{1,0,0}{0.07} \\ 
    & \multicolumn{1}{l|}{SOFL}  & 71.48
      & 94.68 & 62.85
      & \textcolor[rgb]{0.4, 0.4, 0.4}{6.27 }  & \textcolor[rgb]{0.4, 0.4, 0.4}{0.91}
      & \textcolor[rgb]{0.4, 0.4, 0.4}{6.25 }  & \textcolor[rgb]{0.4, 0.4, 0.4}{0.91} 
      & \textcolor[rgb]{1,0,0}{0.46 } & \textcolor[rgb]{1,0,0}{0.00}
      & \textcolor[rgb]{1,0,0}{0.50 } & \textcolor[rgb]{1,0,0}{0.00}
      & \textcolor[rgb]{1,0,0}{0.34 } & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.35 } & \textcolor[rgb]{1,0,0}{0.00}   \\ 
    & \multicolumn{1}{l|}{ACET}   & 78.75  
      & 77.06   & 21.75  
      & 77.06   & 21.75 
      & 77.06   & 21.75
      & 0.14   & 0.07
      & 0.16   & 0.07 
      & 0.09  & 0.00
      & 0.11  & 0.00 \\
    & \multicolumn{1}{l|}{ATOM}   & 78.56  
      & 75.59   & 28.43   
      & 75.59   & 28.43 
      & 75.59   & 28.43
      & 0.29   & 0.03 
      & 0.35   & 0.03 
      & 0.10  & 0.00 
      & 0.10  & 0.00 \\
    & \multicolumn{1}{l|}{RCE}    & 60.68 
      & 83.23   & 47.38  
      & 64.24   & 26.17 
      & 64.30   & 26.32
      & 63.60   & 25.74 
      & 63.75   & 25.70  
      & 61.84  & 23.55
      & 61.93  & 23.55  \\
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{\rotatebox{270}{Tiny-200} }} 
    & \multicolumn{1}{l|}{OE}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\ 
    & \multicolumn{1}{l|}{SOFL}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\ 
    & \multicolumn{1}{l|}{ACET}   & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  
    & \multicolumn{1}{l|}{ATOM}   & 0.-- 
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  
    & \multicolumn{1}{l|}{RCE}    & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \end{tabular}%
  }
  }
  \label{tab:results-mixed-OOD}
  \end{table*}


\subsection{Main Results}
\label{sec:main-res}

\subsubsection{Results of detecting adaptive OOD attacks.}
\label{sec:results-of-detecting-id-OOD-attacks} 

DaD aims to fundamentally enhance the worst-case, end-to-end robustness of DNNs in detecting OOD attacks when using class prediction probabilities. For comparison, we select detections that also aim to improve the end-to-end robustness of DNNs against OOD attacks. \tabref{tab:results-mixed-OOD} shows the results, where DaD achieves significantly better OOD detection robustness under the strong APGD attacks without significantly compromising the detection performance of clean OOD examples (Clean). On more datasets such as CIFAR-100 and Tiny-ImageNet-200, DaD obtains more significant improvements in OOD detection robustness. 

As expected, cleanly trained OE and SOFL are significantly defeated by weak PGD attacks. ACET and ATOM, which introduce adversarial training to auxiliary OOD data while using a uniform representation, effectively thwart weak PGD attacks but are notably compromised by the strong APGD attacks we developed; in fact, they can be completely defeated on the complex CIFAR and Tiny-ImageNet-200 datasets. In contrast, RCE, which employs a uniform distribution for OOD data, demonstrates relatively effective resistance against APGD attacks. This is because, viewing all OOD data as a normal class of data, it uses different labels for different ID classes of data and OOD data. However, RCE's performance in detecting clean OOD samples is significantly lower than that of OE and SOFL, as its adversarial training on ID data considerably reduces the MSPs of clean ID samples, hindering the distinction between ID and OOD samples. 
Conversely, Robdet and DaD effectively avoid the hurt to clean OOD detection and achieve a better OOD detection robustness than RCE. Nevertheless, DaD achieve a better OOD detection robustness than Robdet due to its self-learning setup in \equaref{eq:dad-self-labelling-ood}, which allows for better pseudo-labeling of different auxiliary OOD data. We emphasize again that although RobDet achieves good OOD detection robustness, it does not reveal the importance of setting pseudo-labels for OOD data that can effectively distinguish them for robust OOD learning and detection. In Section **.**, we conduct an in-depth ablation study on the impact of pseudo-labels on the robustness of OOD detection.


% Please add the following required packages to your document preamble:
% \usepackage{multirow}
% \usepackage{graphicx}
\begin{table}[!htb]
  \centering
  \caption{Detection results on different individual OOD datasets. The model and the ID dataset are WRN-40-4 and CIFAR10.}
  \label{tab:results-indiv-datasets}
  \resizebox{0.5\textwidth}{!}{%
  \setlength{\tabcolsep}{2.0pt}{
  \begin{tabular}{l|l|cc|cc|cc|cc}
  \hline
  \multicolumn{1}{c}{} & \multicolumn{1}{c|}{} & \multicolumn{8}{c}{AUC ($\uparrow$ ) / TPR-95 ($\uparrow$)} \\ \hline
  \multicolumn{1}{l|}{$D_{o}$} 
    & Methods 
      & \multicolumn{2}{c|}{Clean } 
      & \multicolumn{2}{c|}{PGD$^{io}_{lgt}$ } 
      & \multicolumn{2}{c|}{APGD$^{io}_{lgt}$ } 
      & \multicolumn{2}{c}{TAPGD$^{io}_{lgt}$ } \\ \hline
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{Places365} }} 
  & ACET 
    & 92.60 & 54.28
    & 92.58 & 54.22
    & 2.84 & 1.60 
    & 1.84 & 1.20 \\
  & ATOM 
    & 93.60 & 73.97 
    & 93.55 & 73.91 
    & 0.91 & 0.02
    & 0.91 & 0.02 \\
  & RCE
    & 90.60 & 53.49 
    & 74.49 & 13.45
    & 73.88 & 12.60
    & 71.78 & 8.65 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{SVHN} }} 
  & ACET 
    & 93.21 & 62.91
    & 93.21 & 62.91
    & 1.14 & 1.32
    & 1.14 & 1.32 \\
  & ATOM 
    & 94.95 & 76.82
    & 94.95 & 76.82
    & 1.66 & 0.45
    & 1.06 & 0.00 \\
  & RCE
    & 94.91 & 63.29 
    & 82.69 & 20.14 
    & 82.03 & 19.18
    & 80.07 & 14.86 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{ LSUN (c.) } }} 
  & ACET 
    & 96.44 & 84.26
    & 96.42 & 84.26
    & 1.17 & 1.89
    & 0.49 & 0.02 \\
  \multicolumn{1}{l|}{} & ATOM 
    & 98.68 & 97.73
    & 98.64 & 97.73
    & 1.64 & 0.73
    & 0.32 & 0.05 \\
  \multicolumn{1}{l|}{} & RCE 
    & 97.66 & \multicolumn{1}{c|}{85.56} 
    & 91.26 & \multicolumn{1}{c|}{56.39} 
    & 90.82 & \multicolumn{1}{c|}{55.00} 
    & 89.77 & 50.85 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{LSUN (r.) } }} 
  & ACET 
    & 93.89 & 65.32
    & 93.89 & 65.32
    & 1.02 & 1.35
    & 0.53 & 0.34 \\
  \multicolumn{1}{l|}{} & ATOM 
    & 96.65 & 74.92
    & 96.65 & 74.92
    & 0.91  & 0.43
    & 0.03  & 0.00 \\
  \multicolumn{1}{l|}{} & RCE
    & 94.27 & \multicolumn{1}{c|}{67.74} 
    & 82.16 & \multicolumn{1}{c|}{25.08} 
    & 81.79 & \multicolumn{1}{c|}{23.75} 
    & 80.53 & 18.40 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ 
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{iSUN} }} 
  & ACET 
    & 93.51 & 56.22 
    & 93.51 & 56.22 
    & 1.82 & 0.44
    & 0.45 & 0.01 \\
  \multicolumn{1}{l|}{} & ATOM 
    & 94.65 & 62.12
    & 94.65 & 62.12
    & 1.01 & 0.34
    & 0.08 & 0.00 \\
  & RCE
    & 94.27 & \multicolumn{1}{c|}{58.27} 
    & 79.16 & \multicolumn{1}{c|}{18.34} 
    & 78.75 & \multicolumn{1}{c|}{17.40} 
    & 77.47 & 12.82 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{Textures} }} 
  & ACET 
    & 90.73 & 59.97
    & 90.73 & 59.97
    & 2.06  & 1.52 
    & 1.14 & 1.18 \\
  & ATOM 
    & 96.57 & 79.96
    & 96.57 & 79.96
    & 1.07 & 1.45
    & 1.25 & 0.12 \\
  & RCE
    & 92.38 & \multicolumn{1}{c|}{61.06} 
    & 85.61 & \multicolumn{1}{c|}{35.30} 
    & 85.19 & \multicolumn{1}{c|}{34.41} 
    & 84.00 & 31.96 \\
  & RobDet
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\
  & DaD
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.-- 
    & 0.-- & 0.--  \\ \hline
  \end{tabular}%
  }
  }
  \end{table}

\subsubsection{Robust detection performance on each individual out-of-distribution test set.}
\label{sec:results-on-indiv-OOD}

In this subsection, we report the OOD detection robustness results on each individual out-of-distribution test set. As illustrated in \tabref{tab:results-indiv-datasets}, DaD demonstrates significantly better performance in detecting adversarial OOD examples crafted by strong APGD attacks, while also maintaining a comparable clean OOD detection performance to clean-trained methods like OE and SOFL. These results further underscore the effectiveness of DaD in enhancing the robustness of OOD detection.

\begin{shaded}
\textbf{Answer to RQ1.} DaD effectively repairs and enhances the end-to-end OOD detection robustness of DNNs, significantly outperforming other methods under strong OOD attacks. Meanwhile, DaD maintains superior detection performance in clean OOD examples.
\end{shaded}

% % Please add the following required packages to your document preamble:
% % \usepackage{multirow}
% % \usepackage{graphicx}
% \begin{table*}[!tbp]
%   \centering
%   \caption{Robust detection performance of using the auxiliary using 300K Images.}
%   \resizebox{0.95\textwidth}{!}{%
%   \setlength{\tabcolsep}{2.0pt}{
%   \begin{tabular}{c|c|c|cc|cc|cc|cc|cc|cc|cc}
%   \hline
%    \multicolumn{3}{l|}{} 
%    & \multicolumn{6}{c|}{ ID Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)}
%     & \multicolumn{8}{c}{ OOD Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
%   \multicolumn{1}{c|}{D$^{in}$} 
%     & \multicolumn{1}{c|}{Method} 
%       & \multicolumn{1}{c|}{Acc} 
%         & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ TAPGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ Clean } 
%         & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c}{ TAPGD$^{io}_{lgt}$ } \\ \hline
%   \multicolumn{1}{l|}{\multirow{5}{*}{ \rotatebox{270}{SVHN} }} 
%     & \multicolumn{1}{l|}{ACET} & 96.13 
%       & 35.90   & 9.96 
%       & 19.51   & 2.53 
%       & 16.44   & 1.87  
%       & 99.11   & 98.22 
%       & 99.74   & 99.14 
%       & 88.36   & 80.60 
%       & 88.14  & 78.85   \\ 
%     & \multicolumn{1}{l|}{ATOM} & 95.98  
%       & 99.99   & 99.99   
%       & 18.29  & 10.28   
%       & 16.72  & 9.72  
%       & 99.99   & 100.0 
%       & 99.98  & 100.0 
%       & 86.17   & 79.27   
%       & 86.77   & 79.98   \\ 
%     & \multicolumn{1}{l|}{RCE} & 93.34 
%       & -   & -   
%       & -   & - 
%       & -   & - 
%       & 99.32   & 97.42 
%       & 95.22   & 77.60 
%       & 94.88  & 76.20  
%       & 94.42  & 74.13   \\ 
%     & \multicolumn{1}{l|}{DaD$^{ul}$} & 95.63
%     & 99.99   & 99.99 
%       & \textbf{ 99.82 }  & \textbf{ 98.79 }
%       & \textbf{ 99.84 }  & \textbf{ 98.80 } 
%       & 99.93   &  99.65 
%       & 99.92   &  99.65 
%       & \textbf{ 98.06 }  & \textbf{ 94.28 }  
%       & \textbf{ 97.60 }  & \textbf{ 93.06 }  \\  \hline
%   \multicolumn{1}{l|}{\multirow{4}{*}{ \rotatebox{270}{CIFAR10} }} 
%     & \multicolumn{1}{l|}{ACET} & 95.42 
%       & 98.64   & 99.99   
%       & 41.16   & 27.96 
%       & 24.72   & 11.03 
%       & 98.69   & 95.34  
%       & 95.44   & 80.84
%       & 3.27   & 2.92    
%       & 0.83  & 0.42   \\ 
%     & \multicolumn{1}{l|}{ATOM} & 95.92  
%       & 99.99   & 99.99    
%       & 0.00   & 0.00 
%       & 0.00   & 0.00 
%       & 99.11   & 98.23 
%       & 98.39   & 95.21
%       & 0.44  & 0.31   
%       & 0.41  & 0.31   \\ 
%     & \multicolumn{1}{l|}{RCE} & 85.61 
%       & -   & -  
%       & -   & -  
%       & -   & -   
%       & 93.81   & 66.82 
%       & 82.00   & 29.20
%       & 81.69   & 28.21   
%       & 80.51   & 24.53   \\  
%     & \multicolumn{1}{l|}{DaD$^{ul}$} & 94.85
%       & 98.03   & 99.99   
%       & \textbf{ 97.71 }  & \textbf{ 91.01 }
%       & \textbf{ 96.47 }  & \textbf{ 90.76 }
%       & 99.13   & 97.39 
%       & 97.61   & 97.39 
%       & \textbf{ 93.61 }  & \textbf{ 87.04 }  
%       & \textbf{ 91.84 }  & \textbf{ 81.18 }  \\  \hline
%   \multicolumn{1}{l|}{\multirow{4}{*}{\rotatebox{270}{CIFAR100} }} 
%     & \multicolumn{1}{l|}{ACET} & 77.94 
%       & 93.89   & 99.99   
%       & 28.09   & 14.29 
%       & 12.92   & 9.82 
%       & 88.80   & 68.76 
%       & 84.81   & 49.52 
%       & 1.37   & 0.03   
%       & 0.22  & 0.00   \\ 
%     & \multicolumn{1}{l|}{ATOM} & 78.17 
%       & 94.37   & 99.99    
%       & 0.00   & 0.00 
%       & 0.00   & 0.00  
%       & 91.85   & 83.06 
%       & 90.82   & 81.87 
%       & 0.36   & 0.19   
%       & 0.46   & 0.15   \\ 
%     & \multicolumn{1}{l|}{RCE} & 60.42 
%       & -   & -   
%       & -   & -   
%       & -   & -   
%       & 79.48   & 39.78 
%       & 54.85   & 6.83
%       & 54.14   & 6.29   
%       & 52.31  & 3.36  \\ 
%     & \multicolumn{1}{l|}{DaD$^{ul}$} & 76.16
%       & 93.92   & 100.0   
%       & \textbf{ 92.82 }  & \textbf{ 98.89 }
%       & \textbf{ 91.75 }  & \textbf{ 98.89 }
%       & 92.74   & 77.79 
%       & 89.52   & 77.79 
%       & \textbf{ 88.29 }  & \textbf{ 77.57 }  
%       & \textbf{ 87.06 }  & \textbf{ 76.33 }  \\  \hline
%   \end{tabular}%
%   }
%   }
%   \label{tab:results-of-using-300k-images}
%   \end{table*}

% % Please add the following required packages to your document preamble:
% % \usepackage{multirow}
% % \usepackage{graphicx}
% \begin{table*}[!tbp]
%   \centering
%   \caption{Robust detection performance on Tiny-ImageNet-200.}
%   \resizebox{0.95\textwidth}{!}{%
%   \setlength{\tabcolsep}{2.2pt}{
%   \begin{tabular}{c|c|c|cc|cc|cc|cc|cc|cc|cc}
%   \hline
%    \multicolumn{3}{l|}{} 
%    & \multicolumn{6}{c|}{ ID Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)}
%     & \multicolumn{8}{c}{ OOD Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
%   \multicolumn{1}{c|}{D$^{in}$} 
%     & \multicolumn{1}{c|}{Method} 
%       & \multicolumn{1}{c|}{Acc} 
%         & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ TAPGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ Clean } 
%         & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
%         & \multicolumn{2}{c}{ TAPGD$^{io}_{lgt}$ } \\ \hline
%   \multicolumn{1}{l|}{\multirow{6}{*}{ \rotatebox{270}{Tiny-200} }} 
%     & \multicolumn{1}{l|}{BKG} & 64.48 
%       & 98.94   & 99.92   
%       & 2.73   & 0.62 
%       & 0.32   & 0.05 
%       & 96.42   & 93.64 
%       & 96.12   & 93.22 
%       & 1.21    & 0.62   
%       & 0.40    & 0.09    \\ 
%     & \multicolumn{1}{l|}{ACET} & 64.39 
%       & 95.65   & 94.95  
%       & 5.76    & 0.90 
%       & 4.5     & 0.68 
%       & 97.12   & 94.14 
%       & 96.63   & 93.55 
%       & 42.24   & 10.15   
%       & 8.40    & 1.75    \\ 
%     & \multicolumn{1}{l|}{ATOM} & 64.56  
%       & 98.77   & 97.07  
%       & 4.73   & 0.97 
%       & 2.32   & 0.21 
%       & 99.98   & 100.0 
%       & 90.27   & 89.06 
%       & 1.12    & 0.97   
%       & 0.34    & 0.19   \\ 
%     & \multicolumn{1}{l|}{RCE} & 47.19
%       & -   & - 
%       & -   & - 
%       & -   & -  
%       & 92.12   & 78.90  
%       & 81.34   & 47.46
%       & 81.08   & 46.87  
%       & 80.25   & 43.94    \\ 
%     & \multicolumn{1}{l|}{DaD$^l$} & 64.62
%       & 99.99   & 100.0    
%       & 90.75   & 98.04 
%       & 87.44   & 88.83  
%       & 99.87   & 99.41
%       & 99.86   & 99.41  
%       & 97.02   & 94.72   
%       & 94.31    & 89.25   \\
%     & \multicolumn{1}{l|}{DaD$^{ul}$} & 64.22
%       & 99.99   & 100.0    
%       & \textbf{ 91.77 }   & \textbf{ 98.01 } 
%       & \textbf{ 89.42 }   & \textbf{ 90.13 }
%       & 99.91   & 99.45
%       & 99.90   & 99.44  
%       & \textbf{ 98.01 }  & \textbf{95.97 }  
%       & \textbf{ 96.31 }  & \textbf{91.42 }  \\  \hline
%   \end{tabular}%
%   }
%   }
%   \label{tab:results-on-tiny-200}
%   \end{table*}


% Please add the following required packages to your document preamble:
% \usepackage{multirow}
% \usepackage{graphicx}
\begin{table*}[!htb]
  \centering
  \caption{Robust OOD detection performance of using the auxiliary 300K Images.}
  \resizebox{0.8\textwidth}{!}{%
  \setlength{\tabcolsep}{2.2pt}{
  \begin{tabular}{c|c|c|cc|cc|cc|cc|cc|cc|cc}
  \hline
   \multicolumn{3}{l|}{} 
   & \multicolumn{14}{c}{ AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
  \multicolumn{1}{c|}{D$^{in}$} 
    & \multicolumn{1}{c|}{Method} 
      & \multicolumn{1}{c|}{Acc} 
        & \multicolumn{2}{c|}{ Clean } 
        & \multicolumn{2}{c|}{ PGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ TAPGD$^{io}_{ce}$ } 
        & \multicolumn{2}{c}{ TAPGD$^{io}_{lgt}$ } \\ \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{ \rotatebox{270}{SVHN} }} 
    & \multicolumn{1}{l|}{OE}  & 95.87 
      & 99.88  & 99.62
      & 56.65  & 36.19
      & 56.23  & 35.97
      & \textcolor[rgb]{1,0,0}{42.45 }  & \textcolor[rgb]{1,0,0}{19.94 }
      & \textcolor[rgb]{1,0,0}{42.56 }  & \textcolor[rgb]{1,0,0}{19.70 }
      & \textcolor[rgb]{1,0,0}{36.75 }  & \textcolor[rgb]{1,0,0}{13.86 } 
      & \textcolor[rgb]{1,0,0}{36.77 }  & \textcolor[rgb]{1,0,0}{13.80 }  \\
    & \multicolumn{1}{l|}{SOFL}  & 95.98 
      & 99.96  & 99.83 
      & 41.22  & 25.10
      & 42.53  & 27.87
      & \textcolor[rgb]{1,0,0}{27.76 } & \textcolor[rgb]{1,0,0}{18.89 } 
      & \textcolor[rgb]{1,0,0}{29.09 } & \textcolor[rgb]{1,0,0}{21.33 } 
      & \textcolor[rgb]{1,0,0}{22.84 } & \textcolor[rgb]{1,0,0}{14.97 } 
      & \textcolor[rgb]{1,0,0}{22.80 } &  \textcolor[rgb]{1,0,0}{14.83} \\ 
    & \multicolumn{1}{l|}{ACET}  & 96.16 
      & 99.94 & 99.75 
      & 97.59 & 91.23 
      & 97.55 & 91.16 
      & 95.90 & 88.20 
      & 95.91 & 88.16 
      & 95.63 & 87.61 
      & 95.63 &  87.67 \\
    & \multicolumn{1}{l|}{ATOM}  & 96.07 
      & 99.78 & 99.24 
      & 95.55 & 88.49 
      & 95.11 & 88.47 
      & 92.70 & 85.15 
      & 98.03 & 94.03 
      & 92.22 & 84.50 
      & 96.72 & 91.76 \\ 
    & \multicolumn{1}{l|}{RCE}  & 92.97
      & 99.79 & 98.95  
      & 96.59 & 82.67
      & 96.66 & 82.74
      & 94.93 & 78.54
      & 95.04 & 78.56 
      & 94.22 & 76.83
      & 94,33 & 76.92  \\ 
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{ \rotatebox{270}{CIFAR10} }} 
    & \multicolumn{1}{l|}{OE}  & 95.64 
      & 98.39  & 96.91 
      & 8.42   & 1.99 
      & 7.85   & 1.79
      & \textcolor[rgb]{1,0,0}{0.32 }  & \textcolor[rgb]{1,0,0}{0.19 }
      & \textcolor[rgb]{1,0,0}{0.36 } & \textcolor[rgb]{1,0,0}{0.27 }
      & \textcolor[rgb]{1,0,0}{0.13 }  & \textcolor[rgb]{1,0,0}{0.07}
      & \textcolor[rgb]{1,0,0}{0.13 } & \textcolor[rgb]{1,0,0}{0.07 }  \\
    & \multicolumn{1}{l|}{SOFL} & 95.24 
      & 99.14 & 97.15 
      & 55.38 & 8.19 
      & 1.16 & 0.20 
      & \textcolor[rgb]{1,0,0}{0.04} & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.06} & \textcolor[rgb]{1,0,0}{0.01} 
      & \textcolor[rgb]{1,0,0}{0.01} & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.01} & \textcolor[rgb]{1,0,0}{0.00} \\
    & \multicolumn{1}{l|}{ACET}   & 95.41 
      & 98.61 & 95.93 
      & 97.81 & 95.75 
      & 97.81 & 95.80 
      & \textcolor[rgb]{1,0,0}{17.20} & \textcolor[rgb]{1,0,0}{10.45} 
      & \textcolor[rgb]{1,0,0}{16.48} & \textcolor[rgb]{1,0,0}{10.09} 
      & \textcolor[rgb]{1,0,0}{8.42} & \textcolor[rgb]{1,0,0}{3.01} 
      & \textcolor[rgb]{1,0,0}{6.90} &  \textcolor[rgb]{1,0,0}{2.00} \\
    & \multicolumn{1}{l|}{ATOM}   & 95.93 
      & 99.25 & 98.38 
      & 98.78 & 98.30 
      & 99.07 & 98.28 
      & \textcolor[rgb]{1,0,0}{6.00} & \textcolor[rgb]{1,0,0}{1.56} 
      & \textcolor[rgb]{1,0,0}{8.01} & \textcolor[rgb]{1,0,0}{3.72} 
      & \textcolor[rgb]{1,0,0}{4.61} & \textcolor[rgb]{1,0,0}{0.73} 
      & \textcolor[rgb]{1,0,0}{0.42} &  \textcolor[rgb]{1,0,0}{0.14} \\ 
    & \multicolumn{1}{l|}{RCE}  & \textcolor[rgb]{1,0,0}{ 85.80 } 
      & \textcolor[rgb]{1,0,0}{94.02} & 65.34 
      & 82.32 & 27.75 
      & 82.39 & 27.73 
      & 81.85 & 26.68 
      & 81.90 & 26.66 
      & 80.35 & 22.40 
      & 80.39 & 22.40 \\ 
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \multicolumn{1}{l|}{\multirow{7}{*}{\rotatebox{270}{CIFAR100} }} 
    & \multicolumn{1}{l|}{OE}  & 77.66 
      & 91.13  & 53.43 
      & 56.07  & 1.56
      & 0.85   & 0.0 
      & \textcolor[rgb]{1,0,0}{0.01}  & \textcolor[rgb]{1,0,0}{0.0 }
      & \textcolor[rgb]{1,0,0}{0.01}  & \textcolor[rgb]{1,0,0}{0.0 }
      & \textcolor[rgb]{1,0,0}{0.00}  & \textcolor[rgb]{1,0,0}{0.0 } 
      & \textcolor[rgb]{1,0,0}{0.00}  & \textcolor[rgb]{1,0,0}{0.0 } \\
    & \multicolumn{1}{l|}{SOFL}  & 77.30 
      & 92.51 & 61.59 
      & 31.24 & 0.68 
      & 4.42 & 0.12 
      & \textcolor[rgb]{1,0,0}{0.01} & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.57} & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.00} & \textcolor[rgb]{1,0,0}{0.00} 
      & \textcolor[rgb]{1,0,0}{0.00} &  \textcolor[rgb]{1,0,0}{0.00} \\
    & \multicolumn{1}{l|}{ACET}   & 78.75  
      & 77.06   & 21.75  
      & 77.06   & 21.75 
      & 77.06   & 21.75
      & 0.14   & 0.07
      & 0.16   & 0.07 
      & 0.09  & 0.00
      & 0.11  & 0.00 \\
    & \multicolumn{1}{l|}{ATOM}   & 78.56  
      & 75.59   & 28.43   
      & 75.59   & 28.43 
      & 75.59   & 28.43
      & 0.29   & 0.03 
      & 0.35   & 0.03 
      & 0.10  & 0.00 
      & 0.10  & 0.00 \\
    & \multicolumn{1}{l|}{RCE}   &  60.42 
      & \textcolor[rgb]{1,0,0}{80.85} & \textcolor[rgb]{1,0,0}{27.93} 
      & 55.03 & 4.57 
      & 55.11 & 4.58 
      & 54.17 & \textcolor[rgb]{1,0,0}{4.33} 
      & 54.25 & \textcolor[rgb]{1,0,0}{4.32} 
      & 51.75 & \textcolor[rgb]{1,0,0}{3.66} 
      & 51.63 & \textcolor[rgb]{1,0,0}{3.65} \\
    & \multicolumn{1}{l|}{RobDet}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\
    & \multicolumn{1}{l|}{DaD}  & 0.--
      & 0.--   & 0.--  
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.-- 
      & 0.--   & 0.--   \\  \hline
  \end{tabular}%
  }
  }
  \label{tab:ood-robustness-of-using-300k-images}
  \end{table*}

\subsubsection{Robust OOD detection performance of using the auxiliary 300K Images.}
\label{sec:results-of-using-300k}

In this section, we further report the robustness of different method models in detecting ID attacks and OOD attacks when using 300K Images. Unlike ImageNet-RC, where each sample is manually labeled, each sample in 300K Images does not have manually annotated labels, so we exclude reporting DaD$^{l}$ and only present the results for DaD$^{self}$. As shown in \tabref{tab:results-of-using-300k-images}, DaD$^{self}$ achieves results similar to those with auxiliary ImageNet-RC as discussed in last \secref{sec:results-of-detecting-id-OOD-attacks}. In the task of detecting adversarial OOD samples, DaD$^{self}$ significantly outperforms the current most successful RCE in robustness and maintains overwhelming robustness over ACET and ATOM (under strong APGD attacks); in the task of detecting adversarial ID samples, DaD$^{self}$ once again surpasses other method models with overwhelming superiority (under strong APGD attacks). Besides, these experimental results also demonstrate that DaD$^{self}$ is insensitive to the auxiliary dataset, enabling continual enhancement of DNNs' true robustness in detecting ID attacks and OOD attacks. 


\subsection{Ablation studies}
\label{sec:ablation-studies}

We study the impact of different numbers of `OOD' classes on the robustness of DaD models in \secref{sec:different-od-classes} and the effect of the two-stage training algorithm for DaD$^{self}$ on robustness in \secref{sec:romoving-2stage}.


\subsubsection{Effect of using the true labels for auxiliary OOD samples.}
\label{sec:different-od-classes}

pass

% Please add the following required packages to your document preamble:
% \usepackage{multirow}
% \usepackage{graphicx}
\begin{table*}[!htbp]
  \centering
  \caption{Impact of varying the number (\#) of `OOD' classes on the robustness of detection.}
  \resizebox{0.9\textwidth}{!}{%
  \setlength{\tabcolsep}{2.2pt}{
  \begin{tabular}{c|c|c|cc|cc|cc|cc|cc|cc|cc}
  \hline
   \multicolumn{3}{l|}{} 
   & \multicolumn{6}{c|}{ ID Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)}
    & \multicolumn{8}{c}{ OOD Attacks, AUC ($\uparrow$) / TPR-$N$ ($\uparrow$)} \\ \hline
  \multicolumn{1}{c|}{Method} 
    & \multicolumn{1}{c|}{\#} 
      & \multicolumn{1}{c|}{Acc} 
        & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ TAPGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ Clean } 
        & \multicolumn{2}{c|}{ PGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c|}{ APGD$^{io}_{lgt}$ } 
        & \multicolumn{2}{c}{ TAPGD$^{io}_{lgt}$ } \\ \hline
  \multicolumn{1}{l|}{\multirow{4}{*}{ DaD$^{l}$ }} 
    & \multicolumn{1}{l|}{3} & 95.93 
      & 98.46   & 99.98 
      & 97.45   & 92.11 
      & 97.07   & 91.43
      & 98.86   & 99.44 
      & 98.83   & 99.27 
      & 89.51   & 83.22   
      & 88.80   & 82.14   \\ 
    & \multicolumn{1}{l|}{4} & 95.48  
      & 98.97   & 100.0  
      & 97.85   & 92.21 
      & 97.18   & 91.73
      & 99.09   & 97.00 
      & 99.08   & 97.00 
      & 89.48   & 83.26   
      & 88.38   & 82.87   \\ 
    & \multicolumn{1}{l|}{5} & 95.24 
      & 98.89   & 100.0  
      & 97.78   & 92.17 
      & 97.13   & 91.66
      & 99.10   & 96.67 
      & 99.10   & 96.67  
      & 89.31   & 83.04   
      & 88.65   & 82.01   \\ 
    & \multicolumn{1}{l|}{10} & 95.53 
      & 99.19   & 100.0  
      & 98.04   & 92.35 
      & 97.38   & 91.82
      & 99.17   & 96.67 
      & 99.17   & 96.67  
      & 91.13   & 85.09   
      & 90.72   & 84.87   \\  \hline
  \multicolumn{1}{l|}{\multirow{4}{*}{ DaD$^{self}$ }} 
    & \multicolumn{1}{l|}{3} & 93.39 
      & 99.94   & 100.0   
      & 99.83   & 99.89 
      & 98.80   & 96.78
      & 99.10   & 98.89 
      & 99.06   & 98.89
      & 98.84   & 98.63   
      & 98.49   & 98.11   \\ 
    & \multicolumn{1}{l|}{4} & 93.96  
      & 99.97   & 100.0   
      & 99.86   & 99.91 
      & 98.82   & 96.80
      & 99.12   & 99.71 
      & 99.09   & 93.71 
      & 98.68   & 98.35   
      & 98.59   & 98.08   \\ 
    & \multicolumn{1}{l|}{5} & 93.39 
      & 99.87   & 99.99  
      & 99.79   & 99.85 
      & 98.76   & 96.71
      & 99.17   & 99.28 
      & 99.12   & 99.28 
      & 98.50   & 98.63   
      & 97.83   & 97.72   \\ 
    & \multicolumn{1}{l|}{10} & 92.37 
      & 99.80   & 99.99  
      & 97.82   & 95.24 
      & 97.67   & 94.98
      & 98.87   & 98.43 
      & 98.83   & 98.43 
      & 98.02   & 97.46   
      & 95.14   & 92.70   \\ \hline
  \end{tabular}%
  }
  }
  \label{tab:results-of-diff-od-classes}
  \end{table*}

\subsubsection{Effect of varying the number of `OOD' classes on detection robustness.}
\label{sec:different-od-classes}


DaD leverages the adversarial training defense constructed on multiple `OOD' classes as detection to fix the robustness of DNNs in end-to-end detection of ID and OOD attacks. The key setting is to properly use/generate pseudo-labels for auxiliary OOD data that can well differentiate them. In this subsection, we investigate the impact of varying the number of `OOD' classes on detection robustness. \tabref{tab:results-of-diff-od-classes} shows the corresponding experimental results, where the selected model and ID dataset is WRN-40-4 over CIFAR10. We observe that, as the number of `OOD' classes varies, the change in the robustness of DaD$^{l}$ and DaD$^{self}$ under ID and OOD attacks is not significant, demonstrating the practicality of DaD's ease of training, which is not sensitive to the number of `OOD' classes. The reason behind this is that DNNs do not necessarily require high robust accuracy in classifying adversarial OOD samples, but rather need to ensure that the sum of predicted probabilities on all `OOD' classes remains high to achieve good detection performance. 

\begin{shaded}\textbf{Answer to RQ3.} 
The number of `OOD' classes does not significantly affect the robustness of the DaD model in detecting ID attacks and OOD attacks. Empirically, we recommend simply setting the number of `OOD' classes to be around 30\%-40\% of the total real ID class count.
\end{shaded}


\begin{table*}[]
  \centering
  \caption{Effect of using the MSP of `OOD' classes on detection robustness. AUC$_{MSP_{O}}$ (AUC$_{SSP_{O}}$) represents AUC under using MSP$_{O}$ (SSP$_{O}$) as the scoring function.}
  \resizebox{0.7\textwidth}{!}{%
  \setlength{\tabcolsep}{2.2pt}{
  \begin{tabular}{c|c|c|c}
  \hline
      & \multicolumn{3}{c}{AUC$_{MSP_{O}}$ / AUC$_{SSP_{O}}$ / MMSP$_{O}$ / MSSP$_{O}$}   \\ \hline
  Method 
    & ID 
    & ID TAPGD$^{io}_{lgt}$
    & OOD TAPGD$^{io}_{lgt}$  \\ \hline
  DaD$^{l}$ 
    & - / - / 0.0029 / 0.0034 
    & 96.27 / \textbf{97.18} / 0.1892 / 0.5181 
    & 86.56 / \textbf{88.38} / 0.2451 / 0.5110 \\ \hline
  DaD$^{self}$                      
    & - / - / 0.0028 / 0.0039 
    & 95.81/ \textbf{98.82} / 0.2872 / 0.6206
    & 96.22/ \textbf{98.59} / 0.2891 / 0.6208 \\ \hline
  \end{tabular}
  }}
  \label{tab:using-od-classes-msp}
  \end{table*}


\subsubsection{Effect of using the MSP of `OOD' classes on detection robustness.}
\label{sec:using-od-classes-msp}

The internal maximization for solving $\delta^{o*}$ in the second term of \equaref{eq:dad-training-obj} can cause the Maximum Softmax Probability of the `OOD' class (MSP$_{O}$) of OOD samples to decrease. This is not conducive to distinguishing between ID samples and (adversarial) OOD samples using MSP$_{O}$ \footnote{Usually, ID samples' MSP$_{O}$ is significantly lower than that of OOD samples}. Considering that adversarial training on `OOD' classes does not significantly decrease the Sum of Softmax Probabilities of `OOD' classes (SSP$_{O}$), DaD uses SSP$_{O}$ to differentiate between ID and OOD samples. 
In this subsection, we report on the impact of using MSP$_{O}$ a scoring function on detection robustness. As shown in \tabref{tab:using-od-classes-msp}, both the ID and OOD detection robustness decrease when using MSP$_{O}$ to distinguish ID and non-ID samples. The gap between the MSP$_{O}$ mean (MMSP$_{O}$) for ID samples and the MMSP$_{O}$ for non-ID sample is significantly smaller than the gap between the SSP$_{O}$ mean (MSSP$_{O}$) for ID samples and MSSP$_{O}$ for non-ID samples. These results suggest that DaD effectively mitigates the adverse effect of comprised confidence estimates on `OOD' class by using the sum of softmax probabilities on `OOD' classes.

\begin{shaded}\textbf{Answer to RQ4.} 
Using the MSP of `OOD' class is conducive to detection robustness, and
DaD effectively mitigates the adverse effect by using the sum of softmax probabilities on `OOD' classes.
\end{shaded}

\subsection{Results of detecting adaptive ID attacks.}
\label{sec:results-of-detecting-id-attacks}

pass

\section{Threats to validity}
\label{sec:threats}

In this paper, we propose DaD to fix the end-to-end robustness of DNNs in detecting both ID and OOD attacks using class probabilities. The computational cost of DaD is similar to RCE since the cost of generating pseudo labels for OOD data is ignorable compared to the training. We also develop a series of strong adaptive attacks to validate the true robustness of detection. However, similar to the majority of works in the adversarial detection field, we do not validate transformer architecture visual models \cite{vaswani2017attention,
dosovitskiy2020image, han2021transformer}, as these models only exhibit performance advantages on extremely large-scale datasets. Nonetheless, DaD only requires adding a certain number of `OOD' classes in the final layer, decoupling it from the DNN's architecture, thus should be effective for other architectures. 
% Additionally, the baselines chosen in this paper are all retraining
% methods within the detection domain that fundamentally address the
% robustness issues of DNNs. We did not compare them to methods that
% apply additional measures to pre-trained DNN models, as these
% "peripheral" methods have generally been rendered non-robust to
% later strong attacks. Moreover, these methods can be complementary
% rather than adversarial to our approach, as the models used in these
% methods can be directly replaced with models developed using our
% method to further enhance robustness. 

\section{Conclusion}
\label{sec:conclusion}

% The decision logic of DNNs is learned from the training set, making the development of secure DNN software more challenging. DNNs can be vulnerable to both in- and out-of-distribution attacks. While a lot of research has been done to detect adversarial attacks, few methods can effectively detect both in- and out-of-distribution attacks, and most are not thoroughly validated under strong adaptive attacks. 
We propose `Defense as Detection' (DaD), a novel semi-supervised approach to fundamentally repair the end-to-end detection robustness of DNNs against in- and out-of-distribution attacks. The key insight of DaD is to generate pseudo-labels for auxiliary OOD data that can well differentiate them, enhancing the adversarial training robustness over multiple additional added `OOD' class against attacks. To reliably validate detection robustness, we also develop a series of strong adaptive attacks (integrating multi-targeted proxy loss and Auto-PGD search algorithm). Our extensive experiments demonstrate that DaD significantly enhances the true robustness of DNNs in detecting both in- and out-of-distribution strong attacks while maintaining advanced accuracy on ID samples in the main task and detection performance on clean OOD samples. Additionally, our experiments also reveal that widely used adaptive attacks solved by PGD (even with CW loss) are ineffective in breaking false security (especially in identifying false secure OOD detection) compared to our strong adaptive attacks, highlighting the importance of avoiding overly optimistic evaluations in future works.


% \bibliographystyle{ieee_fullname}
% \bibliography{egbib}


\bibliographystyle{IEEEtran}
\bibliography{egbib}


\newpage

\section{Biography Section}
If you have an EPS/PDF photo (graphicx package needed), extra braces are
 needed around the contents of the optional argument to biography to prevent
 the LaTeX parser from getting confused when it sees the complicated
 $\backslash${\tt{includegraphics}} command within an optional argument. (You can create
 your own custom macro containing the $\backslash${\tt{includegraphics}} command to make things
 simpler here.)
 
\vspace{11pt}

\bf{If you include a photo:}\vspace{-33pt}
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{fig1}}]{Michael Shell}
Use $\backslash${\tt{begin\{IEEEbiography\}}} and then for the 1st argument use $\backslash${\tt{includegraphics}} to declare and link the author photo.
Use the author name as the 3rd argument followed by the biography text.
\end{IEEEbiography}

\vspace{11pt}

\bf{If you will not include a photo:}\vspace{-33pt}
\begin{IEEEbiographynophoto}{John Doe}
Use $\backslash${\tt{begin\{IEEEbiographynophoto\}}} and the author name as the argument followed by the biography text.
\end{IEEEbiographynophoto}


\vfill

\end{document}


