Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 20241004/2112.05965v5.json +318 -0
- 20241004/2208.10570v2.json +0 -0
- 20241004/2304.01484v3.json +0 -0
- 20241004/2304.10207v3.json +233 -0
- 20241004/2307.07191v2.json +0 -0
- 20241004/2311.07237v3.json +0 -0
- 20241004/2311.07693v2.json +131 -0
- 20241004/2311.09756v3.json +0 -0
- 20241004/2311.10040v2.json +545 -0
- 20241004/2312.08064v3.json +0 -0
- 20241004/2312.10577v2.json +30 -0
- 20241004/2401.04660v2.json +425 -0
- 20241004/2401.07867v3.json +0 -0
- 20241004/2402.04732v2.json +0 -0
- 20241004/2402.09272v2.json +548 -0
- 20241004/2402.17512v4.json +0 -0
- 20241004/2403.07721v7.json +0 -0
- 20241004/2403.12025v2.json +0 -0
- 20241004/2403.15744v6.json +0 -0
- 20241004/2404.13477v2.json +0 -0
- 20241004/2404.17451v2.json +0 -0
- 20241004/2404.18533v3.json +0 -0
- 20241004/2405.07649v2.json +77 -0
- 20241004/2405.15067v3.json +0 -0
- 20241004/2406.06449v2.json +132 -0
- 20241004/2406.13444v3.json +0 -0
- 20241004/2406.17600v2.json +0 -0
- 20241004/2407.01687v2.json +549 -0
- 20241004/2407.07087v2.json +0 -0
- 20241004/2407.08495v2.json +0 -0
- 20241004/2407.11041v4.json +0 -0
- 20241004/2407.18215v2.json +76 -0
- 20241004/2407.19000v2.json +0 -0
- 20241004/2408.04226v3.json +0 -0
- 20241004/2408.04391v2.json +135 -0
- 20241004/2408.06520v2.json +141 -0
- 20241004/2408.10902v3.json +0 -0
- 20241004/2409.04005v2.json +608 -0
- 20241004/2409.07272v3.json +247 -0
- 20241004/2409.14247v2.json +0 -0
- 20241004/2409.16728v2.json +96 -0
- 20241004/2409.18256v2.json +11 -0
- 20241004/2409.18881v2.json +128 -0
- 20241004/2409.19992v2.json +134 -0
- 20241004/2410.00822v2.json +450 -0
- 20241004/2410.01457v2.json +553 -0
- 20241004/2410.02279v2.json +555 -0
- 20241004/2410.02458v2.json +0 -0
- 20241004/2410.03054v1.json +0 -0
- 20241004/2410.03069v1.json +0 -0
20241004/2112.05965v5.json
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Parallelized Robust Distributed Model Predictive Control in the Presence of Coupled State Constraints",
|
| 3 |
+
"abstract": "In this paper, we present a robust distributed model predictive control (DMPC) scheme for dynamically decoupled nonlinear systems which are subject to state constraints, coupled state constraints and input constraints. In the proposed control scheme, all subsystems solve their local optimization problem in parallel and neighbor-to-neighbor communication suffices. The approach relies on consistency constraints which define a neighborhood around each subsystem\u2019s reference trajectory where the state of the subsystem is guaranteed to stay in. Contrary to related approaches, the reference trajectories are improved consecutively. In order to ensure the controller\u2019s robustness against bounded uncertainties, we employ tubes. The presented approach can be considered as a time-efficient alternative to the well-established sequential DMPC. In the end, we briefly comment on an iterative extension. The effectiveness of the proposed DMPC scheme is demonstrated with simulations, and its performance is compared to other DMPC schemes.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "INTRODUCTION",
|
| 9 |
+
"text": "Model predictive control (MPC) algorithms are successfully employed in a wide range of applications and there exists a broad body of literature. The fact that hard constraints on states and inputs can be directly incorporated into the controller and a performance criterion can be taken into account by means of solving an optimization problem significantly contributes to their popularity. A basic approach for proving recursive feasibility and convergence of the optimization problems has been developed in [1 ###reference_b1###] for continuous-time nonlinear systems, and is the basis for numerous MPC algorithms proposed since then. Similar results are derived for discrete-time systems in [2 ###reference_b2###], and an overview can be found in [3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###]."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Distributed MPC",
|
| 15 |
+
"text": "Since the presentation of initial distributed model predictive control (DMPC) schemes [6 ###reference_b6###], their development became a thriving branch in the research on MPC. The motivation behind the development of DMPC is that centralized MPC [4 ###reference_b4###] becomes computationally intractable for large-scale systems, and a reliable communication with a central control-unit is difficult to realize in the case of spatially distributed systems [7 ###reference_b7###, 8 ###reference_b8###].\nIn [9 ###reference_b9###], the methods by which distributed MPC algorithms compute control input trajectories are classified into four groups: iterative methods, sequential methods, methods employing consistency constraints, and approaches based on robustness considerations. In iterative methods, the local controllers exchange the solutions to their local optimization problems several times among each other until they converge. In sequential approaches, local optimization problems of neighboring subsystems are not evaluated in parallel but one after another. In algorithms based on consistency constraints, neighboring subsystems exchange reference trajectories and guarantee to stay in their neighborhood. Other DMPC algorithms consider the neighbors\u2019 control decisions as a disturbance. Examples can be found in [9 ###reference_b9###]. As remarked in [10 ###reference_b10###], the task of distributing MPC algorithms is too complex in order to solve it with one single approach. Instead, for various types of centralized MPC problems, distributed controllers have been taylored. A broad collection of notable DMPC algorithms can be found in [11 ###reference_b11###].\nEspecially the distribution of MPC problems subject to coupled state constraints turned out to be complicated [12 ###reference_b12###], and most available DMPC schemes that are capable of handling them cannot avoid a sequential scheme [13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###, 16 ###reference_b16###]. However, sequential schemes have the drawback that the computation of the control input for all subsystems becomes very time-consuming for highly connected networks. A notable exception that does not rely on a sequential scheme can be found in [17 ###reference_b17###], where a consistency constraint approach is used instead. This admits that even in the presence of coupled state constraints all subsystems can solve their local optimization problem in parallel and still retain recursive feasibility. In [18 ###reference_b18###], this approach is transferred to a continuous-time setup, and in [19 ###reference_b19###, 20 ###reference_b20###, 21 ###reference_b21###], it is extended to a so-called plug-and-play MPC algorithm. However, [17 ###reference_b17###, 18 ###reference_b18###] employ fixed reference trajectories, i.e., reference trajectories that may not be modified once defined. This is limiting and restricts the possibility to optimize the system\u2019s performance significantly. In this paper, we overcome this limitation for dynamically decoupled systems subject to coupled state constraints."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Contributions",
|
| 21 |
+
"text": "We develop a generic DMPC scheme based on consistency constraints that allows for the parallelized evaluation of the local optimization problems in the presence of coupled state constraints. In contrast to the consistency constraint based approaches [17 ###reference_b17###, 18 ###reference_b18###] with fixed reference trajectories, we show that in the case of dynamically decoupled systems this limitation can be overcome. In particular, we show that recursive feasibility and asymptotic stability can be still obtained when invoking a less restrictive assumption on reference trajectories and consistency constraints. It allows the reference trajectories and consistency constraints to be updated at every time-step. As nominal MPC subject to constraints is sensitive to model uncertainties and disturbances, we formulate the proposed DMPC scheme via a constraint-tightening approach using robust tubes.\nThe proposed DMPC scheme can be considered as a time-efficient and scalable alternative to sequential DMPC [15 ###reference_b15###, 22 ###reference_b22###], and a more performant and less restrictive version of the consistency constraint based DMPC in [17 ###reference_b17###, 18 ###reference_b18###] for dynamically decoupled systems. The preliminary version of the proposed DMPC scheme as presented in [23 ###reference_b23###] is restricted to nominal dynamics and the pairwise coupling of states through constraints which are limitations that we overcome in this paper.\nThe remainder is structured as follows. In Sec. 2 ###reference_###, we present the partitioned system and the control objective. In Sec. 3 ###reference_###, we define the local optimization problems (Sec. 3.1 ###reference_###), present assumptions that allow for their parallelized evaluation and ensure robust asymptotic stability (Sec. 3.2 ###reference_###), and derive guarantees of the closed-loop system (Sec. 3.3 ###reference_###). A brief discussion concludes the section. In Sec. 4 ###reference_###, we provide details on the initialization, the computation of reference trajectories and summarize the overall DMPC algorithm. In Sec. 5 ###reference_###, the algorithm\u2019s effectiveness is demonstrated, and in Sec. 6 ###reference_### some conclusions are drawn."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "1.3",
|
| 25 |
+
"parent_section_id": "1",
|
| 26 |
+
"section_name": "Notation",
|
| 27 |
+
"text": "A continuous function is a class function if it is strictly increasing and . If the domain of a trajectory with , , is clear from the context, we also write . By , we denote a trajectory that is computed at time step . The short-hand is equivalent to where is a set of indices. Let . Their Minkowski sum is , the Pontryagin difference , and their Cartesian product. , denote the repeated evaluation of the respective operations over sets of indices. The Hausdorff distance is defined as the minimal distance of a point to a set , i.e., ; denotes the Euclidean norm. Let be a matrix, and a scalar. Then, and . Finally, denote vectors of all zeros or ones, an identity matrix, and for and positive-definite, we define the weighted norm ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Preliminaries",
|
| 33 |
+
"text": "In this section, we introduce the system dynamics of a partitioned system and its constraints, define the network topology, review robustness related concepts, and finally state the control objective."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.1",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "System Dynamics and Constraints",
|
| 39 |
+
"text": "Consider a distributed system consisting of subsystems which are dynamically decoupled and behave according to the discrete-time dynamics\nwhere , denote the actual state and input of subsystem , respectively, and a bounded uncertainty.\nIn stack-vector form, the dynamics of the overall system are correspondingly given by\nwhere with , , , stack vectors , , , and .\nAll subsystems may be subject to coupled and non-coupled state constraints. If there exists a coupled state constraint of subsystem that depends on state of subsystem , then we call subsystem a neighbor of subsystem and we write where is the set of all neighboring subsystems of . Let subsystem be subject to coupled state constraints. Then, we define the non-coupled and coupled state constraints of subsystem , respectively, via inequalities as\nwhere and are some continuous functions, and specifies those neighbors of subsystem whose states are coupled with subsystem via coupled state constraint ; are some constants.\nThe corresponding state constraint sets are defined as\nwhere is a set valued function. Moreover, all subsystems are subject to input constraints\nFrom the actual subsystem dynamics (1 ###reference_###), we distinguish the nominal dynamics of the undisturbed system which are given by\nwhere , denote the nominal state and nominal input of subsystem , respectively. The nominal dynamics of the overall system are denoted by"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.2",
|
| 43 |
+
"parent_section_id": "2",
|
| 44 |
+
"section_name": "Network Topology",
|
| 45 |
+
"text": "The coupled state constraints define a graph structure on the distributed system under consideration. The graph is given as where defines the communication links among the subsystems in . Hence neighboring subsystems can communicate with each other. Throughout the paper, we assume that graph is undirected.111A graph is undirected if implies .\nMoreover, to avoid that subsystems can behave in an adversarial way and \u201cforce\u201d neighboring subsystems to infeasible states, we assume that neighboring subsystems have those constraints that couple their states in common. This is formally stated next.\nLet , , be any coupled state constraint of a subsystem . Then all neighbors are subject to a constraint\nfor some where and where denotes the equivalence of the functions."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.3",
|
| 49 |
+
"parent_section_id": "2",
|
| 50 |
+
"section_name": "Robust Stability and Tubes",
|
| 51 |
+
"text": "In order to handle uncertainties in the dynamics (1 ###reference_###), we resort to a tube-based approach [24 ###reference_b24###, 25 ###reference_b25###]. Here, an auxiliary controller is employed to bound the deviation of the system\u2019s actual state to the predicted state into an invariant set.\nTo this end, we formally define the deviation of an actual state and nominal state as , and the corresponding dynamics of are\nwith . Considering a control signal\nconsisting of a nominal input and an auxiliary controller , we assume that for (7 ###reference_###) controlled by (8 ###reference_###) there exists a robust positively invariant (RPI) set .\nLet the dynamics of in (7 ###reference_###) be controlled by (8 ###reference_###). For all , assume that there exists a neighborhood of the origin such that for all , , and . Such is called an RPI set.\nFor the construction of RPI sets , we refer to the rich literature on robust tube-based MPC, see [26 ###reference_b26###, 25 ###reference_b25###, 27 ###reference_b27###, 28 ###reference_b28###, 29 ###reference_b29###, 30 ###reference_b30###, 31 ###reference_b31###, 32 ###reference_b32###]. For a general nonlinear system, the dynamics of in (7 ###reference_###) cannot be written as a function , i.e., . Therefore, it is not always straightforward to find RPI sets . Instead, most works on the construction of RPI sets focus on particular classes of systems. In [25 ###reference_b25###], linear systems of the form are considered where are real matrices of respective sizes and the pair is assumed to be controllable. In [27 ###reference_b27###], this is extended to systems with matched nonlinearities of the form\nwhere is assumed to be invertible for all . For both dynamics, auxiliary controllers can be found such that the dynamics of take the form . In contrast, the construction in [29 ###reference_b29###] does not require that the dynamics of take this form. Other recent approaches improve uncertainty bounds online [31 ###reference_b31###], or employ the high-gain idea from funnel control [32 ###reference_b32###] (the latter however is confined to continuous-time systems). Our focus in this paper is on integrating into the proposed consistency constraint based DMPC scheme irrespective of the particular robust MPC method.\nAs a consequence of Ass. 2 ###reference_2###, the actual state stays in a neighborhood of the nominal state for all times since , or equivalently\nThen, we can determine the set of all inputs that possibly takes and define it as\nThe resulting tightened constraint sets are given as , , and .\nBy suitably choosing a nominal control input , we aim at guaranteeing the robust asymptotic stabilization of the overall uncertain dynamics (2 ###reference_###).\nLet be an RPI set for the autonomous discrete-time system with an equilibrium point and , . The equilibrium is said to be robustly stable if for each , there exists a such that for all and arbitrary disturbances ; denotes the Hausdorff distance. The equilibrium is robustly asymptotically stable if is robustly stable and .\nAt last, the following lemma provides useful relations for set-operations, especially in the context of uncertainties.\nLet . Then , and ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "2.4",
|
| 55 |
+
"parent_section_id": "2",
|
| 56 |
+
"section_name": "Control Objective",
|
| 57 |
+
"text": "Let be a steady state of the nominal dynamics (5 ###reference_###) of subsystem for a constant nominal input . Moreover, denote the stack vector of all steady states by . For all subsystems , the control objective is to robustly asymptotically stabilize desired states where\nsatisfies all nominal state constraints (coupled and uncoupled) and nominal input constraints. Note that the tightened constraint sets as previously defined in Sec. 2.3 ###reference_### ensure that none of the actual constraints (3 ###reference_###) and (4 ###reference_###) are violated in the desired formation due to the uncertainties."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Distributed MPC Problems",
|
| 63 |
+
"text": "For each subsystem, we now formulate local optimization problems that can be solved in parallel while guaranteeing the satisfaction of all constraints, most notably that of coupled state constraints. In this section, we provide the theoretic foundations of our approach. Implementation details and the overall DMPC algorithm are presented in the subsequent Sec. 4 ###reference_###."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.1",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "Local Optimization Problems",
|
| 69 |
+
"text": "At every time-step , a subsystem predicts a nominal state trajectory for and a corresponding nominal input trajectory for in accordance with the nominal dynamics (5 ###reference_###). We call the prediction horizon.\nAs the actual state is uncertain, and it is only known that according to (9 ###reference_###), the nominal state trajectories are determined such that the initial state satisfies\nwhere (cf. notation in Sec. 1.3 ###reference_###).\nIn order to allow for parallelization, we introduce the notion of consistency constraints. A consistency constraint guarantees that the nominal state trajectory stays in a -neighborhood of a given reference trajectory , namely\nwhere is some compact neighborhood of the origin which we call consistency constraint set, and a given reference trajectory at time . Both and are assumed to be known to all neighbors of subsystem , and are further specified later. While a consistency constraint allows to optimize the nominal state trajectory in a neighborhood of a reference trajectory, it makes the nominal state trajectory predictable to neighbors. Thereby, consistency constraints give rise to parallelization.\nThroughout the paper, we employ reference trajectories as an inherent ingredient to consistency constraint based DMPC which is aligned with the terminology used in [17 ###reference_b17###, 18 ###reference_b18###]. This should not be mixed up with reference trajectories in the tracking MPC literature.\nIf Ass. 2 ###reference_2### and thereby (9 ###reference_###) hold, then consistency constraint (13 ###reference_###) also implies\nFigure 1 ###reference_### illustrates the concept of consistency constraints.\n###figure_1### ###figure_2### ###figure_3### Given and , we define the local optimization problem of subsystem at time-step as\nwhere\nwith stage-cost-function , a positive-definite terminal cost-function , and positive-definite matrices . The local optimality criterion (15 ###reference_###) is subject to\nfor and some terminal set to be further specified below.\nWe denote those trajectories , that minimize (15 ###reference_###) by and .\nThen, the control law for the -th subsystem (1 ###reference_###), for all , is given by\nwhere and is the auxiliary controller rendering an RPI set (cf. Ass. 2 ###reference_2###).\nWe impose the following slightly modified standard assumptions [4 ###reference_b4###] on terminal sets and terminal cost .\nFor the terminal sets , the terminal cost function , and some state-feedback controller , , we assume:\nfor all , ,\n, and\nfor all , it holds for all and for all ,\n(terminal states satisfy uncoupled (i) and coupled state constraints (ii) in a -neighborhood);\nfor all (input constraint satisfaction);\nfor all (set invariance);\nfor all (terminal cost function is a local Lyapunov function)."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "3.2",
|
| 73 |
+
"parent_section_id": "3",
|
| 74 |
+
"section_name": "Reference Trajectories and Consistency Constraints",
|
| 75 |
+
"text": "The role of the consistency constraints is twofold: on the one hand, consistency constraints shall ensure the satisfaction of all state constraints (both coupled and uncoupled) by bounding the predicted nominal state trajectory in the neighborhood of its reference trajectory . On the other hand, consistency constraints shall only restrict the evolution of nominal state trajectories in so far such that the feasibility of the local optimization problem (15 ###reference_###)-(16 ###reference_###) is preserved. To this end, we assume the following properties of reference trajectories , , for given consistency constraint sets , .\nFor , , and reference states , ,\nit holds that\nand for all\nand additionally, if , it holds that\nThe first part of Ass. 8 ###reference_8### states that consistency constraints imply the satisfaction of state constraints (3 ###reference_###). The second part of Ass. 8 ###reference_8### leads to recursive feasibility. In Sec. 4.2 ###reference_###, we propose a method for the determination of reference trajectories that satisfy Ass. 8 ###reference_8###.\nAt time , we call a set of reference trajectories , , , and the corresponding input trajectories , , initially feasible if\nwhere satisfies Ass. 7 ###reference_7###."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "3.3",
|
| 79 |
+
"parent_section_id": "3",
|
| 80 |
+
"section_name": "Guarantees of the Distributed MPC Problems",
|
| 81 |
+
"text": "Now, we show that the local optimization problems always remain feasible given that there exist initially feasible trajectories, and that the closed-loop system is robust asymptotically stable with respect to , . The dynamics of the closed-loop system resulting from the application of defined in (17 ###reference_###) are given as\nLet Ass. 2 ###reference_2###, 7 ###reference_7### and 8 ###reference_8### hold, and let there be a set of initially feasible reference trajectories , . Then for each subsystem , the local optimization problems (15 ###reference_###) subject to (16 ###reference_###) are recursively feasible with respect to the closed-loop dynamics (22 ###reference_###) for any . Moreover, the closed-loop dynamics (22 ###reference_###) are robustly asymptotically stable with respect to , .\nIn a first step, we prove recursive feasibility. Thereafter robust asymptotic stability is proven.\nRecursive Feasibility: We have to show that there exist feasible solutions to the local optimization problems (15 ###reference_###) subject to (16 ###reference_###) for given that there exist initially feasible reference trajectories at . To this end, we recursively construct candidate trajectories for and for that satisfy constraints (16 ###reference_###).\nFirstly, consider and choose candidate trajectories\nfor all where denotes an initially feasible reference trajectory and its corresponding input trajectory which together satisfy (21 ###reference_###). Now, we show that and also satisfy (16 ###reference_###).\nSince satisfy (21a ###reference_.1###), it holds that\nand it follows that (16a ###reference_.1###) and (16b ###reference_.2###) are satisfied by for all .\nBecause and is a closed neighborhood of the origin, it follows that (16c ###reference_.3###) is also satisfied by . The satisfaction of (16d ###reference_.4###)-(16e ###reference_.5###) trivially follows from (21d ###reference_.4###)-(21e ###reference_.5###). Thereby, we have shown that there exist feasible solutions to the local optimization problem (15 ###reference_###)-(16 ###reference_###) at .\nIn a next step, we show that there exist feasible solutions for using induction. Assume that for all at time-step , there exist predicted nominal state and input trajectories, namely and , that solve (15 ###reference_###) subject to constraints (16 ###reference_###) (induction hypothesis).\nAs we have shown in the previous paragraph, the induction hypothesis is initially fulfilled at .\nWe now show that given the induction hypothesis holds, there also exist feasible candidate trajectories , for all at time (induction step). Therefore, we construct for time-steps , , and all subsystems candidate trajectories\nwhere denotes the auxiliary controller associated with and as specified in Ass. 7 ###reference_7###. Now, we show that and satisfy (16 ###reference_###).\nAt first, we obtain for the initial value of\nand (16a ###reference_.1###) is satisfied. As , satisfy (16b ###reference_.2###) according to the induction assumption, candidate trajectories (24 ###reference_###) satisfy (16b ###reference_.2###) as well. Due to Ass. 8.2 ###reference_i2###, the candidate trajectory , , satisfies (16c ###reference_.3###) for all .\nBesides, as , (16d ###reference_.4###) is trivially satisfied for . Moreover, holds due to Ass. 7.2 ###reference_i2### which applies because , and the satisfaction of (16d ###reference_.4###) also follows for . Finally, it follows from Ass. 7.3 ###reference_i3### that\nThus, (16e ###reference_.5###) is satisfied as well.\nNow, as the induction hypothesis is also initially fulfilled, it inductively follows that for all there exists a feasible solution to the optimization problems (15 ###reference_###)-(16 ###reference_###), and we conclude recursive feasibility.\nRobust asymptotic stability: At first, we define the auxiliary cost function\nsubject to (which implies (16a ###reference_.1###)) and (16b ###reference_.2###)\u2013(16e ###reference_.5###); is the same as in (15 ###reference_###).\nThen, it holds\nNote that here , and hence the inequality follows from the suboptimality of with respect to the minimization. Using this preliminary result, we show in the remainder of the proof that is a Lyapunov function in terms of [34 ###reference_b34###, Thm. 1].\nBy the positive definiteness of , it holds for the stage cost function and for all , . Thus, there exists a class function such that and for all . This is because\nand we identify where denotes the smallest eigenvalue of . Moreover, for , the unique optimal solution to the local optimization problem (15 ###reference_###)-(16 ###reference_###) is , , . Therefore, for all . Thus, by the feasibility of (15 ###reference_###)-(16 ###reference_###) as shown in the recursive feasibility proof, there exists a class function such that .\nNext, we investigate the descent on for the closed-loop system. At first, we observe\nwhere the latter inequality follows from definition (25 ###reference_###) and the suboptimality of the candidate trajectories. From this, we further derive\nwhere is a class function. Note that\nwhere , which finally yields, together with (27 ###reference_###)-(28 ###reference_###),\nfor all . Thus, by [34 ###reference_b34###, Thm. 1], we have shown that is a Lyapunov function. We further conclude the asymptotic stability of under closed-loop dynamics (22 ###reference_###), and equivalently the robust asymptotic stability of for all . .\nNext, we show that state and input trajectories of the closed-loop system (22 ###reference_###) satisfy all constraints.\nLet the same premises hold as in Thm. 9 ###reference_9###, i.e., let Ass. 2 ###reference_2###, 7 ###reference_7### and 8 ###reference_8### hold, and let there be a set of initially feasible reference trajectories . Then for all , the state trajectory , , of the closed-loop system (22 ###reference_###) and the corresponding input trajectory , , satisfy state constraints (3 ###reference_###) and input constraints (4 ###reference_###).\nFrom Thm. 9 ###reference_9###, it follows that the local optimization problems are recursively feasible and hence state and input trajectories and of the closed-loop dynamics (22 ###reference_###) are well-defined for . According to (17 ###reference_###), the input trajectory is given by where is such that (16d ###reference_.4###) holds, and by definition (10 ###reference_###), we have . Then, it follows that\nand input constraint (4 ###reference_###) holds. Next, due to consistency constraint (16c ###reference_.3###) and (cf. (9 ###reference_###)), it follows that for all\nThen due to Ass. 8 ###reference_8###, the satisfaction of state constraints (3a ###reference_1###) and coupled state constraints (3b ###reference_2###) follows from (18 ###reference_###)-(19 ###reference_###). ."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "3.4",
|
| 85 |
+
"parent_section_id": "3",
|
| 86 |
+
"section_name": "Discussion",
|
| 87 |
+
"text": "If reference trajectories and consistency constraint sets satisfy Ass. 8.1 ###reference_i1###, then state constraint satisfaction is implied via consistency constraint (16d ###reference_.4###). Therefore, no further state constraints apart from the consistency constraint need to be considered in the local optimization problem (15 ###reference_###)-(16 ###reference_###). Hence, the local optimization problems are subject to fewer constraints compared to other DMPC schemes allowing for coupled state constraints, see [14 ###reference_b14###, 15 ###reference_b15###, 16 ###reference_b16###, 17 ###reference_b17###].\nAdditionally, if the consistency constraint sets, namely , are chosen to be convex, then the local optimization problems (15 ###reference_###)-(16 ###reference_###) may be convex even if the original state constraints (3 ###reference_###) are non-convex. In particular, this is the case if dynamics (1 ###reference_###) are linear and the input constraint sets convex; note that linear dynamics give rise to convex and . However, even if dynamics (1 ###reference_###) are nonlinear, the local optimization problems (15 ###reference_###)-(16 ###reference_###) approximately constitute a convex problem for sufficiently small . This is because consistency constraint (16c ###reference_.3###) restricts the optimal solutions to a neighborhood of a reference trajectory in which the system dynamics are approximately linear.\nMoreover, we do not require that reference trajectories satisfy any dynamics (cf. Ass. 8 ###reference_8###) after initialization, i.e., for . This is in contrast to other DMPC approaches employing consistency constraints [35 ###reference_b35###, 18 ###reference_b18###, 17 ###reference_b17###]. Moreover, we allow that reference trajectories can be updated at every time step which allows for enhanced performance compared to algorithms with fixed reference trajectories (see performance comparison Sec. 5.3 ###reference_###). We detail the reference trajectory update in the next section."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Parallelized DMPC Algorithm",
|
| 93 |
+
"text": "In the previous section, we have formulated local optimization problems for all subsystems and assumptions that allow for their parallelized evaluation while ensuring recursive feasibility. While the satisfaction of Ass. 1 ###reference_1###, 2 ###reference_2###, 7 ###reference_7### needs to be ensured during the initialization, Ass. 8 ###reference_8### is the only assumption that needs to be taken into account online. In this section, we detail the initialization procedure, present a recursive algorithm to update reference trajectories such that Ass. 8 ###reference_8### is satisfied, and state the overall DMPC algorithm."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.1",
|
| 97 |
+
"parent_section_id": "4",
|
| 98 |
+
"section_name": "Initialization",
|
| 99 |
+
"text": "In order to solve the local optimization problems, some parameters need to be chosen offline. Therefore, we suggest the following initialization procedure:\nStep 1 (Problem Formulation): Formulate the control problem such that neighboring agents have those constraints that couple their states in common (Ass. 1 ###reference_1###). This can be always achieved by adding coupled state constraints to neighboring subsystems.\nStep 2 (RPI sets): For each subsystem , determine an RPI set and the corresponding state-feedback controller . For each subsystem, the computations are independent of the other subsystems due to the decoupled dynamics. Available methods are reviewed in Remark 3 ###reference_3###. The existence of and , , is assumed in Ass. 2 ###reference_2###.\nStep 3 (Terminal constraints, stage cost function, and consistency constraint set): For each subsystem , choose symmetric positive-definite matrices and a stage cost function . Thereafter, determine terminal sets and corresponding terminal cost functions such that Ass. 7 ###reference_7### holds. To this end, we first construct auxilliary sets , , that satisfy Ass. 7.2 ###reference_i2###-7.4 ###reference_i4###. These can be computed independently for each subsystem due to the decoupled dynamics. For systems whose linearization is stabilizable, an approach using the discrete-time algebraic Riccati equation can be chosen. Let\nfor some matrices , with respective dimensions and . By solving the discrete-time algebraic Riccati equation\na positive definite matrix is obtained. Choose and with a scalar . Then, there exists a sufficiently small scalar such that Ass. 7.2 ###reference_i2###, 7.4 ###reference_i4### hold for all [3 ###reference_b3###, Remark 5.15]. At last, choose as some neighborhood of the origin and set (this choice might need to be refined later). Then select in a centralized way222Also a distributed computation of is possible. For example, each subsystem incrementally increases and checks at each step constraint satisfaction. For each subsystem, the largest is chosen that still satisfies the constraints. Alternatively, distributed iterative optimization algorithms can be considered [36 ###reference_b36###]. with a sufficiently small scalar such that the constraints in Ass. 7.1 ###reference_i1### hold for all and all subsystems . Note that Ass. 7.3 ###reference_i3### is satisfied due to the choice of as a super-level set of .\nStep 4 (Initially feasible reference trajectories): Determine initially feasible reference trajectories , , that satisfy (21 ###reference_###), e.g. by solving a centralized optimization problem subject to (21 ###reference_###). If no such trajectories could be found, choose a smaller set . \nNote that (21b ###reference_.2###)-(21c ###reference_.3###) can often be simplified. If are linear and are polytopes, then (21b ###reference_.2###)-(21c ###reference_.3###) can be expressed as linear algebraic inequalities. Libraries for computations with polytopes are MPT3 (Matlab) [37 ###reference_b37###], Polyhedra (Julia) [38 ###reference_b38###] and Polytope (Python) [39 ###reference_b39###]. Alternatively, if are nonlinear but still Lipschitz continuous, then there exist scalars and such that333For simplicity, and are assumed to be scalar. In the case of vectors, subsequent calculations hold for row-wise evaluation.\nwhere and . Define the maximal distance of any point in some compact set to some point as \nThen, there exist scalars and , and it holds\nThus, we can replace (21b ###reference_.2###)-(21c ###reference_.3###) by"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "4.2",
|
| 103 |
+
"parent_section_id": "4",
|
| 104 |
+
"section_name": "Online Determination of Reference Trajectories",
|
| 105 |
+
"text": "In Ass. 8 ###reference_8###, general conditions are stated that allow for the parallelized evaluation of the local optimization problems (15 ###reference_###) subject to (16 ###reference_###) while preserving recursive feasibility (cf. Thm. 9 ###reference_9###) and ensuring state constraint satisfaction (cf. Prop. 10 ###reference_10###). Algorithms that ensure the satisfaction of Ass. 8 ###reference_8### are essential to the proposed DMPC scheme. The generality of Ass. 8 ###reference_8### allows for various such algorithms. In this section, we propose such an algorithm that updates a previous reference trajectory for each subsystem . The algorithm is distributed.\nIn particular, let each subsystem be initialized with a set of initially feasible reference trajectories . Then for , each subsystem checks for all separately if\nand\nfor all .\nFrom a practical point of view, (29 ###reference_###)-(30 ###reference_###) can be efficiently evaluated analogously to Step 4 in the previous section. Following the same reasoning, if , are linear and are polytopes, then (29 ###reference_###)-(30 ###reference_###) can be expressed as linear algebraic inequalities. Alternatively, if are only Lipschitz continuous, then (29 ###reference_###)-(30 ###reference_###) can be replaced by\nwhere , , and are as before. In other cases, the evaluation is more elaborate.\nBased on conditions (29 ###reference_###)-(30 ###reference_###), the updated reference trajectories are defined for each subsystem and as\nIntuitively, each subsystem attempts to change its previous reference state to the predicted state for any . The reference states, however, are only changed if no state within a -neighborhood of the predicted state violates any of the state constraints (3 ###reference_###) which is checked by conditions (29 ###reference_###) and (30 ###reference_###). In condition (30 ###reference_###) on the coupled state constraints, both the predicted states and the previous reference states of the neighboring subsystems are considered. This allows for the parallelized computation of reference trajectories. The reference trajectory update of subsystem for is summarized in Algorithm 1 ###reference_###. It guarantees the satisfaction of Ass. 8 ###reference_8### as stated by the following proposition.\nInput , , , \nOutput\nLet Ass. 1 ###reference_1### and Ass. 7.1 ###reference_i1### be satisfied, and let there be a set of initially feasible reference trajectories , . Then for all , reference trajectories and , , recursively defined in (31 ###reference_###), satisfy Ass. 8 ###reference_8### for all times .\nSatisfaction of Ass. 8.1 ###reference_i1###: At first, we show by induction that satisfies Ass. 8.1 ###reference_i1### for all . Note that initially feasible reference trajectories satisfy Ass. 8.1 ###reference_i1### for by definition.\nNext, assuming that , , satisfies Ass. 8.1 ###reference_i1###, we show that also , , satisfies Ass. 8.1 ###reference_i1###. We split the proof in two parts: at first, we show that , , as defined in (31a ###reference_.1###) satisfies Ass. 8.1 ###reference_i1###. Thereafter, we show that also as defined in (31b ###reference_.2###) satisfies Ass. 8.1 ###reference_i1### for . From this, we can conclude, that overall satisfies Ass. 8.1 ###reference_i1### at time .\nPart 1: Consider , , as defined in (31a ###reference_.1###). Note that for all . We consider two cases:\nCase 1: Let conditions (29 ###reference_###)-(30 ###reference_###) be satisfied. Then observe that (29 ###reference_###) is equivalent to (18 ###reference_###). Moreover, as for any neighbor , it holds that , (30 ###reference_###) implies (19 ###reference_###). We conclude the satisfaction of Ass. 8.1 ###reference_i1### in case 1. \nCase 2: If conditions (29 ###reference_###)-(30 ###reference_###) are not satisfied for , then . As satisfies (18 ###reference_###) at time-step , it does so at since (18 ###reference_###) is time-invariant and does not depend on other subsystems.\nIn order to determine the satisfaction of (19 ###reference_###), we assume at first that conditions (29 ###reference_###)-(30 ###reference_###) are not satisfied at and any of the neighbors . Then for all , and the satisfaction of (19 ###reference_###) trivially follows from the previous time step when satisfies (19 ###reference_###) by assumption. Next, assume that for all , where is an arbitrary subset of , conditions (29 ###reference_###)-(30 ###reference_###) are satisfied. Then, for all . Let be any and consider the coupled state constraint . By Ass. 1 ###reference_1###, there exist constraints , , for all neighbors of subsystem such that\nFor these , the satisfaction of (30 ###reference_###) implies\nDue to (32 ###reference_###) and since , we can rewrite the latter equation as\nwhere . This is equal to (19 ###reference_###) at time . Thus, we have shown that even in the case that conditions (29 ###reference_###)-(30 ###reference_###) are satisfied for some of the neighbors of subsystem , namely , (19 ###reference_###) still holds. Thereby, Ass. 8.1 ###reference_i1### is also satisfied in case 2.\nPart 2: Consider at where by (31b ###reference_.2###). Since according to terminal constraint (16e ###reference_.5###), the choice satisfies Ass. 8.1 ###reference_i1### due to Ass. 7.1 ###reference_i1###.\nSatisfaction of Ass. 8.2 ###reference_i2###: At last, we show the satisfaction of Ass. 8.2 ###reference_i2###. To this end, we consider the two cases for , , from part 1 again. In case 1, if conditions (29 ###reference_###)-(30 ###reference_###) are satisfied for , then which trivially implies (20 ###reference_###). In case 2, if conditions (29 ###reference_###)-(30 ###reference_###) are not satisfied, we have\nwhich is equivalent to (20 ###reference_###). For at , (20 ###reference_###) is trivially satisfied. Altogether, we also conclude the satisfaction of Ass. 8.2 ###reference_i2###.\n.\nOther strategies to update the reference trajectories are also possible; e.g., sets could be varied in addition."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "4.3",
|
| 109 |
+
"parent_section_id": "4",
|
| 110 |
+
"section_name": "Distributed MPC Algorithm",
|
| 111 |
+
"text": "The distributed MPC algorithm is initialized as detailed in Sec. 4.1 ###reference_###. Then, the distributed MPC algorithms as given in Algorithm 2 ###reference_### are executed in parallel by all subsystems . Note that Algorithm 1 ###reference_### can be replaced by any other algorithm that ensures the satisfaction of Ass. 8 ###reference_8### for all .\nInput\nIf Algorithm 2 ###reference_### is initialized with suboptimal initially feasible reference trajectories, then an iterative version of the proposed DMPC scheme can lead to an improved performance. Therefore, Algorithm 2 ###reference_### is modified as follows: After solving local optimization problem (15 ###reference_###)-(16 ###reference_###), a new reference trajectory , , is computed as\nThen, the local optimization problems are repeatedly solved. Depending on the available computation time, this procedure can be repeated multiple times, before applying the last computed control input . An example is presented in Sec. 5 ###reference_###.\nThe proposed approach is not directly generalizable to systems with dynamic couplings. The main challenges are twofold. Firstly, a parallelized non-iterative DMPC scheme presumably only yields approximate asymptotic convergence to a neighborhood of the desired states, which [35 ###reference_b35###] alleges. Secondly, the heterogeneity in the subsystems\u2019 constraints hardens the update of the reference trajectories (to circumvent this, [17 ###reference_b17###, 18 ###reference_b18###] employ fixed reference trajectories). It is expected that further conditions that account for this heterogeneity in the case of systems with bounded dynamic couplings need to be invoked to ensure recursive feasability. This however is a non-trivial problem on its own and left for future research. For systems with bounded dynamic couplings, however, the proposed DMPC scheme can be extended (though conservatively) along the lines of [40 ###reference_b40###, Sec. 4.6]."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5",
|
| 115 |
+
"parent_section_id": null,
|
| 116 |
+
"section_name": "Simulation",
|
| 117 |
+
"text": "In this section, we investigate the performance of the proposed algorithm with respect to computation time and optimality. To this end, we consider mobile robots subject to connectivity and collision avoidance constraints, which are often considered coupled state constraints in the literature [41 ###reference_b41###, 42 ###reference_b42###, 14 ###reference_b14###].\nIn particular, we consider the kinematic model of three-wheeled omni-directional robots. The state of robot is given as where , denote the position coordinates and the orientation; its position is defined as . The dynamics of robot are given as\nwhere\nis the radius of the robot body, the wheel radius, the angular velocity of the wheels, and a bounded uniformly distributed disturbance. The corresponding nominal dynamics are\nwhere is the nominal state of robot and the nominal input. The nominal position is defined as ."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "5.1",
|
| 121 |
+
"parent_section_id": "5",
|
| 122 |
+
"section_name": "Controller Design",
|
| 123 |
+
"text": "We follow the four initialization steps in Sec. 4.1 ###reference_### as presented next in great detail.\nLet us consider three robots with nonlinear dynamics (33 ###reference_###) which shall move from an initial formation to a target formation .\nAll robots are subject to connectivity constraints\nwith , and input constraints where denotes the maximum norm. It can be easily verified that the coupled state constraints satisfy Ass. 1 ###reference_1### by design (step 1).\nThe continuous-time nominal dynamics (34 ###reference_###) are discretized with time-step using a 4th-order Runge-Kutta integration algorithm where the nominal inputs are applied as zero-order hold. The control input applied to the robots is\nwhere , .\nNext, we construct RPI sets , (step 2). To this end, we choose the continuous-time auxiliary controller as\nwhere is Hurwitz. Substituting (37 ###reference_###) into (36 ###reference_###), (36 ###reference_###) into (33 ###reference_###), and computing yields\nWe choose for all . To numerically compute the RPI set , we exactly discretize the continuous-time deviation dynamics (38 ###reference_###) and obtain\nwith , and . We assume that , or equivalently, . Then, we can compute as in [24 ###reference_b24###]. The resulting RPI set is a box given as . An outer approximation of is computed in accordance with (10 ###reference_###). Then, Ass. 2 ###reference_2### is satisfied. For required set computations, we use the MPT3 toolbox [37 ###reference_b37###] for Matlab and YALMIP [43 ###reference_b43###].\nNow, we choose the performance matrices as , , , , and consistency constraint sets for all where . Matrices and are chosen such that subsystem 1 tends faster to its desired state than the other subsystems. Thereby, subsystem 1 attempts to violate the connectivity constraint, which is of interest in the performance analysis conducted later (Sec. 5.3 ###reference_###). The terminal cost functions and terminal sets are computed via the discrete-time algebraic Riccati equation as outlined in Sec. 4.1 ###reference_###, step 3, and Ass. 7.2 ###reference_i2###-7.4 ###reference_i4### are satisfied. By choosing terminal sets sufficiently small, also Ass. 7.1 ###reference_i1### is satisfied.\nAt last, we determine initially feasible reference trajectories (step 4). Therefore, observe that is Lipschitz continuous with for all . Then following the discussion on Lipschitz continuous constraints in Sec. 4.1 ###reference_###, step 4, we can rewrite (21c ###reference_.3###) for all more conservatively as\nwhere with . By solving an optimization subject to (21 ###reference_###), we compute initially feasible reference trajectories , , where (21c ###reference_.3###) is implemented as (40 ###reference_###).\nTo conclude the controller design, we implement the local optimization problems (15 ###reference_###)-(16 ###reference_###) where consistency constraint (16c ###reference_.3###) is implemented as a box constraint using the inner approximation of . Reference trajectories are updated at every time-step by Algorithm 1 ###reference_### which ensures the satisfaction of Ass. 8 ###reference_8### (cf. Prop. 12 ###reference_12###). Thereby, the satisfaction of all assumptions is ensured by the controller design."
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5.2",
|
| 127 |
+
"parent_section_id": "5",
|
| 128 |
+
"section_name": "Simulation Results",
|
| 129 |
+
"text": "The three mobile robots start in the initial formation , , , and move to the target formation , , where . Observe that for increasing , the inter-robot distances in the target formation increase. The prediction time is chosen as and the prediction horizon as .\n###figure_4### ###figure_5### \n###figure_6### ###figure_7### ###figure_8### ###figure_9### \n###figure_10### ###figure_11### ###figure_12### ###figure_13### ###figure_14### ###figure_15### ###figure_16### ###figure_17### ###figure_18### ###figure_19### ###figure_20### ###figure_21### ###figure_22### ###figure_23### ###figure_24### ###figure_25### ###figure_26### ###figure_27### ###figure_28### ###figure_29### ###figure_30### ###figure_31### For , the resulting trajectories are depicted in Fig. 2 ###reference_###. Fig. 2(a) ###reference_sf1### shows how the actual state trajectories oscillate around the nominal trajectories due to the disturbances. For comparison, Fig. 2(b) ###reference_sf2### shows the state trajectories in the absence of disturbances; for a detailed discussion of this case, we refer to [23 ###reference_b23###]. Fig. 3 ###reference_### shows that the actual inter-robot distances satisfy the coupled state constraint (35 ###reference_###)."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5.3",
|
| 133 |
+
"parent_section_id": "5",
|
| 134 |
+
"section_name": "Performance Analysis",
|
| 135 |
+
"text": "In order to evaluate the performance of the proposed algorithm with respect to computation time and actual cost, we compare it with two other robust DMPC algorithms: (1) Algorithm 2 ###reference_### with fixed reference trajectories\nThis choice of reference trajectories corresponds to the choice in [17 ###reference_b17###]. (2) Sequential DMPC [14 ###reference_b14###] which is based on [22 ###reference_b22###, Sec. 2] and [15 ###reference_b15###]. The DMPC controllers are implemented using Casadi [44 ###reference_b44###], Ipopt and Matlab; simulations are performed on an Intel Core i5-10310U, 16GB RAM.\nThe simulation results for the relative actual costs and computational times are summarized in Tables 2 ###reference_### and 2 ###reference_###. For robot , the actual cost is computed over the simulated time interval as where ; the -th entry in each field of Table 2 ###reference_### is the actual cost normed with the actual cost of the proposed DMPC. The presented numbers are the average from 100 simulations.\nIn Fig. 4 ###reference_###, the nominal inter-robot distances are depicted for various . For the pair , (40 ###reference_###) can be rewritten as\nwhich is a condition on the nominal inter-robot distance. Intuitively, this means that if the graph of exceeds the dashed line in Fig. 4 ###reference_###, then (41 ###reference_###) is violated. As a consequence, the reference state at the respective time is not changed in order to prevent a potential violation of the coupled state constraints (cf. Sec. 4.2 ###reference_###).\nThis observation can be related to the relative actual costs in Table 2 ###reference_###. The closer the nominal distance gets to the dashed line or even exceeds it, the closer is the performance of the proposed DMPC to that of the DMPC with fixed reference (cf. ). However, if does not exceed the dashed line, the performance of the proposed DMPC scheme is significantly improved compared to that of the DMPC with fixed reference where the relative costs are up to 45% higher (cf. ). Compared to sequential DMPC, the performance of the proposed DMPC scheme also tends to be better in these cases. This indicates that the parallelized evaluation of the local optimization problems can be beneficial over a sequential one despite the need of a consistency constraint. In particular, the proposed DMPC computes the control inputs more than 4 times faster (Table 2 ###reference_###) than sequential DMPC. This is due to the parallel evaluation of the local optimization problems in the proposed DMPC and the reduced number of constraints. This ratio further improves in favor of the proposed DMPC if more subsystems are added.\n###figure_32### ###figure_33### \n###figure_34### ###figure_35### ###figure_36### \n###figure_37### ###figure_38### ###figure_39### \n###figure_40### ###figure_41### ###figure_42### \n###figure_43###"
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "5.4",
|
| 139 |
+
"parent_section_id": "5",
|
| 140 |
+
"section_name": "Collision Avoidance Constraints",
|
| 141 |
+
"text": "Instead of (35 ###reference_###), we now consider the collision avoidance constraint\nwhere . Therefore, we replace (40 ###reference_###) by\nWe consider four mobile robots governed by (33 ###reference_###) as before. The initial formation is , , , ; the target formation is , , , . Everything else remains unchanged.\nWe initialize the DMPC algorithms with reference trajectories where the robots move clockwise to their target states on the opposite side of the formation as depicted in Fig. 5(a) ###reference_sf1###. Because it is generally difficult to determine optimal initially feasible reference trajectories in the presence of concave constraints, it can be beneficial to employ the iterative DMPC scheme as outlined in Rem. 13 ###reference_13###. As it can be seen from Table 3 ###reference_###, the actual cost reduces with an increasing number of iterations. Fig. 5 ###reference_### illustrates how the state trajectories of the closed-loop system improve with an increasing number of iterations. Observe that even though constraint (42 ###reference_###) is non-convex, the local optimization problems in our proposed approach are only subject to convex state constraints. That is because the satisfaction of all state constraints is ensured by the consistency constraint which is convex by choice."
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"section_id": "6",
|
| 145 |
+
"parent_section_id": null,
|
| 146 |
+
"section_name": "Conclusion",
|
| 147 |
+
"text": "We presented a robust DMPC algorithm that allows for the parallel evaluation of the local optimization problems in the presence of coupled state constraints while it admits to alter and improve already established reference trajectories. For the case of dynamically decoupled systems subject to coupled constraints, we thereby provide a novel DMPC scheme that allows for a faster distributed control input computation compared to sequential DMPC schemes. Theoretical guarantees on recursive feasibility and robust asymptotic convergence are provided. Moreover, we briefly commented on an iterative extension of the algorithm. In the end, we demonstrated the algorithm\u2019s applicability and compared its performance to other DMPC algorithms."
|
| 148 |
+
}
|
| 149 |
+
],
|
| 150 |
+
"appendix": [],
|
| 151 |
+
"tables": {
|
| 152 |
+
"1": {
|
| 153 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T2\">\n<div class=\"ltx_flex_figure ltx_flex_table\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.1.1.1\"></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.2.1.1\">Proposed DMPC</span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.3.1.1\">DMPC with fixed reference</span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.1.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.1.4.1.1\">Sequential DMPC <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2112.05965v5#bib.bib14\" title=\"\">14</a>]</cite></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.2.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.1.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.1.1.1\">1.5</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.2.1.1\">1.00,\u20091.00,\u20091.00</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.3.1.1\">0.93,\u20091.45,\u20091.45</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.2.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.2.1.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.2.1.4.1.1\">1.91,\u20090.93,\u20090.94</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.3.2\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.1.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.1.1.1\">2.0</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.2.1.1\">1.00,\u20091.00,\u20091.00</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.3.1.1\">1.02,\u20091.41,\u20091.37</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.3.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.3.2.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.3.2.4.1.1\">1.75,\u20090.91,\u20090.90</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.4.3\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.1.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.1.1.1\">2.5</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.2.1.1\">1.00,\u20091.00,\u20091.00</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.3.1.1\">1.07,\u20091.12,\u20091.14</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.1.4.3.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.4.3.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.4.3.4.1.1\">1.37,\u20090.70,\u20090.72</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.5.4\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.1.5.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.1.1.1\">3.0</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.2.1.1\">1.00,\u20091.00,\u20091.00</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.3.1.1\">1.07,\u20091.00,\u20091.02</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.1.5.4.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.1.5.4.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.1.5.4.4.1.1\">1.04,\u20090.61,\u20090.63</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Relative actual cost for subsystems 1, 2 and 3.</figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<table class=\"ltx_tabular ltx_figure_panel ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.2.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.1.1.1.1\"></span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.1.2.1.1\">Proposed DMPC</span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.2.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.1.3.1.1\">DMPC with fixed reference</span>\n</span>\n</th>\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S5.T2.2.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.1.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.1.4.1.1\">Sequential DMPC <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2112.05965v5#bib.bib14\" title=\"\">14</a>]</cite></span>\n</span>\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.2.2.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.2.2.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.2.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.2.1.1.1.1\">1.5</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.2.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.2.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.2.1.2.1.1\">0.0261</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.2.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.2.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.2.1.3.1.1\">0.0255</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.2.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.2.1.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.2.1.4.1.1\">0.1116</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.2.3.2\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.2.3.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.3.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.3.2.1.1.1\">2.0</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.3.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.3.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.3.2.2.1.1\">0.0267</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.3.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.3.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.3.2.3.1.1\">0.0266</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.3.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.3.2.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.3.2.4.1.1\">0.1155</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.2.4.3\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.2.4.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.4.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.4.3.1.1.1\">2.5</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.4.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.4.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.4.3.2.1.1\">0.0254</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.4.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.4.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.4.3.3.1.1\">0.0253</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T2.2.4.3.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.4.3.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.4.3.4.1.1\">0.1106</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.2.5.4\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T2.2.5.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.5.4.1.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.5.4.1.1.1\">3.0</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.2.5.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.5.4.2.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.5.4.2.1.1\">0.0265</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.2.5.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.5.4.3.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.5.4.3.1.1\">0.0262</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T2.2.5.4.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T2.2.5.4.4.1\">\n<span class=\"ltx_p\" id=\"S5.T2.2.5.4.4.1.1\">0.1157</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n</div>\n</div>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Average computational times for calculating control inputs in seconds.</figcaption>\n</figure>",
|
| 154 |
+
"capture": "Table 1: Relative actual cost for subsystems 1, 2 and 3."
|
| 155 |
+
},
|
| 156 |
+
"2": {
|
| 157 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T3\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S5.T3.7\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T3.7.1.1\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T3.7.1.1.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.1.1.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.1.1.1.1.1\">iterations</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.1.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.1.1.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.1.1.2.1.1\">Robot 1</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.1.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.1.1.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.1.1.3.1.1\">Robot 2</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.1.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.1.1.4.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.1.1.4.1.1\">Robot 3</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.1.1.5\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.1.1.5.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.1.1.5.1.1\">Robot 4</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.7.2.2\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T3.7.2.2.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.2.2.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.2.2.1.1.1\">Fixed ref.</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.2.2.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.2.2.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.2.2.2.1.1\">5.2028</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.2.2.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.2.2.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.2.2.3.1.1\">5.1523</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.2.2.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.2.2.4.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.2.2.4.1.1\">5.1659</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.2.2.5\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.2.2.5.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.2.2.5.1.1\">5.1530</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.7.3.3\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T3.7.3.3.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.3.3.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.3.3.1.1.1\">1</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.3.3.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.3.3.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.3.3.2.1.1\">2.4460</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.3.3.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.3.3.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.3.3.3.1.1\">2.4502</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.3.3.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.3.3.4.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.3.3.4.1.1\">2.4832</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.3.3.5\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.3.3.5.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.3.3.5.1.1\">2.4474</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.7.4.4\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T3.7.4.4.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.4.4.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.4.4.1.1.1\">4</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.4.4.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.4.4.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.4.4.2.1.1\">1.7965</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.4.4.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.4.4.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.4.4.3.1.1\">1.7948</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.4.4.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.4.4.4.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.4.4.4.1.1\">1.7609</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_r ltx_border_t\" id=\"S5.T3.7.4.4.5\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.4.4.5.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.4.4.5.1.1\">1.7633</span>\n</span>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.7.5.5\">\n<th class=\"ltx_td ltx_align_justify ltx_th ltx_th_row ltx_border_b ltx_border_l ltx_border_rr ltx_border_t\" id=\"S5.T3.7.5.5.1\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.5.5.1.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.5.5.1.1.1\">6</span>\n</span>\n</th>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T3.7.5.5.2\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.5.5.2.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.5.5.2.1.1\">1.4752</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T3.7.5.5.3\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.5.5.3.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.5.5.3.1.1\">1.4779</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T3.7.5.5.4\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.5.5.4.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.5.5.4.1.1\">1.5088</span>\n</span>\n</td>\n<td class=\"ltx_td ltx_align_justify ltx_border_b ltx_border_r ltx_border_t\" id=\"S5.T3.7.5.5.5\">\n<span class=\"ltx_inline-block ltx_align_top\" id=\"S5.T3.7.5.5.5.1\">\n<span class=\"ltx_p\" id=\"S5.T3.7.5.5.5.1.1\">1.5011</span>\n</span>\n</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Actual cost () in dependence of the number of iterations, average of 100 simulations. Obtained for .</figcaption>\n</figure>",
|
| 158 |
+
"capture": "Table 3: Actual cost () in dependence of the number of iterations, average of 100 simulations. Obtained for ."
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
"image_paths": {
|
| 162 |
+
"1(a)": {
|
| 163 |
+
"figure_path": "2112.05965v5_figure_1(a).png",
|
| 164 |
+
"caption": "Figure 1: Illustration of consistency constraints (13) (at \u03ba=k\ud835\udf05\ud835\udc58\\kappa=kitalic_\u03ba = italic_k) and (14).",
|
| 165 |
+
"url": "http://arxiv.org/html/2112.05965v5/constraint_sets.pdf"
|
| 166 |
+
},
|
| 167 |
+
"1(b)": {
|
| 168 |
+
"figure_path": "2112.05965v5_figure_1(b).png",
|
| 169 |
+
"caption": "Figure 1: Illustration of consistency constraints (13) (at \u03ba=k\ud835\udf05\ud835\udc58\\kappa=kitalic_\u03ba = italic_k) and (14).",
|
| 170 |
+
"url": "http://arxiv.org/html/2112.05965v5/constraint_sets.pdf"
|
| 171 |
+
},
|
| 172 |
+
"1(c)": {
|
| 173 |
+
"figure_path": "2112.05965v5_figure_1(c).png",
|
| 174 |
+
"caption": "Figure 1: Illustration of consistency constraints (13) (at \u03ba=k\ud835\udf05\ud835\udc58\\kappa=kitalic_\u03ba = italic_k) and (14).",
|
| 175 |
+
"url": "http://arxiv.org/html/2112.05965v5/constraint_sets.pdf"
|
| 176 |
+
},
|
| 177 |
+
"2(a)": {
|
| 178 |
+
"figure_path": "2112.05965v5_figure_2(a).png",
|
| 179 |
+
"caption": "(a) Trajectories of the disturbed system with wi\u2208\ud835\udcb2isubscript\ud835\udc64\ud835\udc56subscript\ud835\udcb2\ud835\udc56w_{i}\\in\\mathcal{W}_{i}italic_w start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT \u2208 caligraphic_W start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT. The black dashed line denotes \ud835\udc31^i\u2062[k|k]subscript^\ud835\udc31\ud835\udc56delimited-[]conditional\ud835\udc58\ud835\udc58\\hat{\\mathbf{x}}_{i}{[}k|k{]}over^ start_ARG bold_x end_ARG start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT [ italic_k | italic_k ], the colored dotted lines the actual trajectories.\nFigure 2: Trajectories of robots for \u03be11=2.5subscript\ud835\udf09112.5\\xi_{11}=2.5italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 2.5.",
|
| 180 |
+
"url": "http://arxiv.org/html/2112.05965v5/trajectories_new.pdf"
|
| 181 |
+
},
|
| 182 |
+
"2(b)": {
|
| 183 |
+
"figure_path": "2112.05965v5_figure_2(b).png",
|
| 184 |
+
"caption": "(b) Trajectories of the undisturbed system, i.e., wi\u22610subscript\ud835\udc64\ud835\udc560w_{i}\\equiv 0italic_w start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT \u2261 0. The markers denote the robots\u2019 orientation. Nominal and actual trajectories coincide.\nFigure 2: Trajectories of robots for \u03be11=2.5subscript\ud835\udf09112.5\\xi_{11}=2.5italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 2.5.",
|
| 185 |
+
"url": "http://arxiv.org/html/2112.05965v5/trajectories_2p5.pdf"
|
| 186 |
+
},
|
| 187 |
+
"3(a)": {
|
| 188 |
+
"figure_path": "2112.05965v5_figure_3(a).png",
|
| 189 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 190 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 191 |
+
},
|
| 192 |
+
"3(b)": {
|
| 193 |
+
"figure_path": "2112.05965v5_figure_3(b).png",
|
| 194 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 195 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 196 |
+
},
|
| 197 |
+
"3(c)": {
|
| 198 |
+
"figure_path": "2112.05965v5_figure_3(c).png",
|
| 199 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 200 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 201 |
+
},
|
| 202 |
+
"3(d)": {
|
| 203 |
+
"figure_path": "2112.05965v5_figure_3(d).png",
|
| 204 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 205 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 206 |
+
},
|
| 207 |
+
"3(e)": {
|
| 208 |
+
"figure_path": "2112.05965v5_figure_3(e).png",
|
| 209 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 210 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 211 |
+
},
|
| 212 |
+
"3(f)": {
|
| 213 |
+
"figure_path": "2112.05965v5_figure_3(f).png",
|
| 214 |
+
"caption": "Figure 3: Actual inter-robot distance di\u2062j=\u2016\ud835\udc31^ipos\u2212\ud835\udc31^jpos\u2016subscript\ud835\udc51\ud835\udc56\ud835\udc57normsubscriptsuperscript^\ud835\udc31pos\ud835\udc56subscriptsuperscript^\ud835\udc31pos\ud835\udc57d_{ij}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}-\\hat{\\mathbf{x}}^{\\text{pos}}_{j}||italic_d start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT | | for \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0. The black line denotes dmax=2.9superscript\ud835\udc51max2.9d^{\\text{max}}=2.9italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT = 2.9.",
|
| 215 |
+
"url": "http://arxiv.org/html/2112.05965v5/actual_distance_new.pdf"
|
| 216 |
+
},
|
| 217 |
+
"4(a)": {
|
| 218 |
+
"figure_path": "2112.05965v5_figure_4(a).png",
|
| 219 |
+
"caption": "(a) \u03be11=1.5subscript\ud835\udf09111.5\\xi_{11}=1.5italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 1.5\nFigure 4: Nominal inter-robot distances. Distance d^i\u2062j[k|k]=||\ud835\udc31^ipos[k|k]\u2212\ud835\udc31^jpos[k|k]||\\hat{d}_{ij}{[}k|k{]}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}{[}k|k{]}-\\hat{%\n\\mathbf{x}}^{\\text{pos}}_{j}{[}k|k{]}||over^ start_ARG italic_d end_ARG start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT [ italic_k | italic_k ] - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] | | denotes the nominal distance between robot i\ud835\udc56iitalic_i and j\ud835\udc57jitalic_j; the dashed line indicates dmax\u2212\u03bdci\u2062jsuperscript\ud835\udc51maxsubscript\ud835\udf08subscript\ud835\udc50\ud835\udc56\ud835\udc57d^{\\text{max}}-\\nu_{c_{ij}}italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT - italic_\u03bd start_POSTSUBSCRIPT italic_c start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT end_POSTSUBSCRIPT.",
|
| 220 |
+
"url": "http://arxiv.org/html/2112.05965v5/dist1p5_new.pdf"
|
| 221 |
+
},
|
| 222 |
+
"4(b)": {
|
| 223 |
+
"figure_path": "2112.05965v5_figure_4(b).png",
|
| 224 |
+
"caption": "(b) \u03be11=2.0subscript\ud835\udf09112.0\\xi_{11}=2.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 2.0\nFigure 4: Nominal inter-robot distances. Distance d^i\u2062j[k|k]=||\ud835\udc31^ipos[k|k]\u2212\ud835\udc31^jpos[k|k]||\\hat{d}_{ij}{[}k|k{]}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}{[}k|k{]}-\\hat{%\n\\mathbf{x}}^{\\text{pos}}_{j}{[}k|k{]}||over^ start_ARG italic_d end_ARG start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT [ italic_k | italic_k ] - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] | | denotes the nominal distance between robot i\ud835\udc56iitalic_i and j\ud835\udc57jitalic_j; the dashed line indicates dmax\u2212\u03bdci\u2062jsuperscript\ud835\udc51maxsubscript\ud835\udf08subscript\ud835\udc50\ud835\udc56\ud835\udc57d^{\\text{max}}-\\nu_{c_{ij}}italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT - italic_\u03bd start_POSTSUBSCRIPT italic_c start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT end_POSTSUBSCRIPT.",
|
| 225 |
+
"url": "http://arxiv.org/html/2112.05965v5/dist2p0_new.pdf"
|
| 226 |
+
},
|
| 227 |
+
"4(c)": {
|
| 228 |
+
"figure_path": "2112.05965v5_figure_4(c).png",
|
| 229 |
+
"caption": "(c) \u03be11=2.5subscript\ud835\udf09112.5\\xi_{11}=2.5italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 2.5\nFigure 4: Nominal inter-robot distances. Distance d^i\u2062j[k|k]=||\ud835\udc31^ipos[k|k]\u2212\ud835\udc31^jpos[k|k]||\\hat{d}_{ij}{[}k|k{]}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}{[}k|k{]}-\\hat{%\n\\mathbf{x}}^{\\text{pos}}_{j}{[}k|k{]}||over^ start_ARG italic_d end_ARG start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT [ italic_k | italic_k ] - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] | | denotes the nominal distance between robot i\ud835\udc56iitalic_i and j\ud835\udc57jitalic_j; the dashed line indicates dmax\u2212\u03bdci\u2062jsuperscript\ud835\udc51maxsubscript\ud835\udf08subscript\ud835\udc50\ud835\udc56\ud835\udc57d^{\\text{max}}-\\nu_{c_{ij}}italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT - italic_\u03bd start_POSTSUBSCRIPT italic_c start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT end_POSTSUBSCRIPT.",
|
| 230 |
+
"url": "http://arxiv.org/html/2112.05965v5/dist2p5_new.pdf"
|
| 231 |
+
},
|
| 232 |
+
"4(d)": {
|
| 233 |
+
"figure_path": "2112.05965v5_figure_4(d).png",
|
| 234 |
+
"caption": "(d) \u03be11=3.0subscript\ud835\udf09113.0\\xi_{11}=3.0italic_\u03be start_POSTSUBSCRIPT 11 end_POSTSUBSCRIPT = 3.0\nFigure 4: Nominal inter-robot distances. Distance d^i\u2062j[k|k]=||\ud835\udc31^ipos[k|k]\u2212\ud835\udc31^jpos[k|k]||\\hat{d}_{ij}{[}k|k{]}=||\\hat{\\mathbf{x}}^{\\text{pos}}_{i}{[}k|k{]}-\\hat{%\n\\mathbf{x}}^{\\text{pos}}_{j}{[}k|k{]}||over^ start_ARG italic_d end_ARG start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] = | | over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT [ italic_k | italic_k ] - over^ start_ARG bold_x end_ARG start_POSTSUPERSCRIPT pos end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT [ italic_k | italic_k ] | | denotes the nominal distance between robot i\ud835\udc56iitalic_i and j\ud835\udc57jitalic_j; the dashed line indicates dmax\u2212\u03bdci\u2062jsuperscript\ud835\udc51maxsubscript\ud835\udf08subscript\ud835\udc50\ud835\udc56\ud835\udc57d^{\\text{max}}-\\nu_{c_{ij}}italic_d start_POSTSUPERSCRIPT max end_POSTSUPERSCRIPT - italic_\u03bd start_POSTSUBSCRIPT italic_c start_POSTSUBSCRIPT italic_i italic_j end_POSTSUBSCRIPT end_POSTSUBSCRIPT.",
|
| 235 |
+
"url": "http://arxiv.org/html/2112.05965v5/dist3p0_new2.pdf"
|
| 236 |
+
},
|
| 237 |
+
"5(a)": {
|
| 238 |
+
"figure_path": "2112.05965v5_figure_5(a).png",
|
| 239 |
+
"caption": "(a) Initialization\nFigure 5: Formation control problem with collision avoidance constraints. Robots 1, 2, 3, 4 are denoted by blue, green, red, violet.",
|
| 240 |
+
"url": "http://arxiv.org/html/2112.05965v5/iterative_init_it4.pdf"
|
| 241 |
+
},
|
| 242 |
+
"5(b)": {
|
| 243 |
+
"figure_path": "2112.05965v5_figure_5(b).png",
|
| 244 |
+
"caption": "(b) Fixed reference trajectories.\nFigure 5: Formation control problem with collision avoidance constraints. Robots 1, 2, 3, 4 are denoted by blue, green, red, violet.",
|
| 245 |
+
"url": "http://arxiv.org/html/2112.05965v5/iterative_traj_fixed_ref_new.pdf"
|
| 246 |
+
},
|
| 247 |
+
"5(c)": {
|
| 248 |
+
"figure_path": "2112.05965v5_figure_5(c).png",
|
| 249 |
+
"caption": "(c) 1 iteration (Algorithm 2)\nFigure 5: Formation control problem with collision avoidance constraints. Robots 1, 2, 3, 4 are denoted by blue, green, red, violet.",
|
| 250 |
+
"url": "http://arxiv.org/html/2112.05965v5/iterative_traj_it1_new.pdf"
|
| 251 |
+
},
|
| 252 |
+
"5(d)": {
|
| 253 |
+
"figure_path": "2112.05965v5_figure_5(d).png",
|
| 254 |
+
"caption": "(d) 4 iterations (algorithm Rem. 13)\nFigure 5: Formation control problem with collision avoidance constraints. Robots 1, 2, 3, 4 are denoted by blue, green, red, violet.",
|
| 255 |
+
"url": "http://arxiv.org/html/2112.05965v5/iterative_traj_it4_new.pdf"
|
| 256 |
+
}
|
| 257 |
+
},
|
| 258 |
+
"validation": true,
|
| 259 |
+
"references": [
|
| 260 |
+
{
|
| 261 |
+
"1": {
|
| 262 |
+
"title": "Communications and Control Engineering, Springer, London, 1st. ed.,\n2011.",
|
| 263 |
+
"author": "L. Gr\u00fcne and J. Pannek, Nonlinear Model Predictive Control Theory and\nAlgorithms.",
|
| 264 |
+
"venue": null,
|
| 265 |
+
"url": null
|
| 266 |
+
}
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"2": {
|
| 270 |
+
"title": "Dordrecht: Springer Netherlands, 2014 ed., 2014.",
|
| 271 |
+
"author": "R. Negenborn and J. Maestre, eds., Distributed Model Predictive Control\nMade Easy, vol. 69 of Intelligent Systems, Control and Automation:\nScience and Engineering.",
|
| 272 |
+
"venue": null,
|
| 273 |
+
"url": null
|
| 274 |
+
}
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"3": {
|
| 278 |
+
"title": "16th IFAC World Congress.",
|
| 279 |
+
"author": "S. Rakovi\u0107 and D. Mayne, \u201cA simple tube controller for efficient robust model\npredictive control of constrained linear discrete-time systems subject to\nbounded disturbances,\u201d IFAC Proceedings Volumes, vol. 38, no. 1,\npp. 241 \u2013 246, 2005.",
|
| 280 |
+
"venue": null,
|
| 281 |
+
"url": null
|
| 282 |
+
}
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"4": {
|
| 286 |
+
"title": "Madison, Wis.: Nob Hill Publ., 1. printing ed., 2009.",
|
| 287 |
+
"author": "J. B. Rawlings and D. Q. Mayne, Model predictive control: theory and\ndesign.",
|
| 288 |
+
"venue": null,
|
| 289 |
+
"url": null
|
| 290 |
+
}
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"5": {
|
| 294 |
+
"title": "Encyclopedia of mathematics and its applications, 2nd ed., 2014.",
|
| 295 |
+
"author": "R. Schneider, Convex bodies : the Brunn-Minkowski theory.",
|
| 296 |
+
"venue": null,
|
| 297 |
+
"url": null
|
| 298 |
+
}
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"6": {
|
| 302 |
+
"title": "\\urlhttp://control.ee.ethz.ch/ mpt.",
|
| 303 |
+
"author": "M. Herceg, M. Kvasnica, C. Jones, and M. Morari, \u201cMulti-Parametric Toolbox\n3.0,\u201d in Proc. of the European Control Conference, (Z\u00fcrich,\nSwitzerland), pp. 502\u2013510, 2013.",
|
| 304 |
+
"venue": null,
|
| 305 |
+
"url": null
|
| 306 |
+
}
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"7": {
|
| 310 |
+
"title": "KTH Royal Institute of Technology, Licentiate Thesis.",
|
| 311 |
+
"author": "A. Wiltz, \u201cDistributed control for spatio-temporally constrained systems,\u201d\n2023.",
|
| 312 |
+
"venue": null,
|
| 313 |
+
"url": null
|
| 314 |
+
}
|
| 315 |
+
}
|
| 316 |
+
],
|
| 317 |
+
"url": "http://arxiv.org/html/2112.05965v5"
|
| 318 |
+
}
|
20241004/2208.10570v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2304.01484v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2304.10207v3.json
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Non-destructive Fault Diagnosis of Electronic Interconnects by Learning Signal Patterns of Reflection Coefficient in the Frequency Domain",
|
| 3 |
+
"abstract": "Fault detection and diagnosis of the interconnects are crucial for prognostics and health management (PHM) of electronics. Traditional methods, which rely on electronic signals as prognostic factors, often struggle to accurately identify the root causes of defects without resorting to destructive testing. Furthermore, these methods are vulnerable to noise interference, which can result in false alarms. To address these limitations, in this paper, we propose a novel, non-destructive approach for early fault detection and accurate diagnosis of interconnect defects, with improved noise resilience. Our approach uniquely utilizes the signal patterns of the reflection coefficient across a range of frequencies, enabling both root cause identification and severity assessment. This approach departs from conventional time-series analysis and effectively transforms the signal data into a format suitable for advanced learning algorithms. Additionally, we introduce a novel severity rating ensemble learning (SREL) approach, which enhances diagnostic accuracy and robustness in noisy environments. Experimental results demonstrate that the proposed method is effective for fault detection and diagnosis and has the potential to extend to real-world industrial applications.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Despite the continuous demand for higher computing power, the performance of essential building blocks, including transistors, memories, and processors, is plateauing. Following the aggressive downscaling of advanced integrated circuits (ICs), electronic interconnects have become the bottleneck for the reliability and performance of entire electronic systems [1 ###reference_b1###]. Defects in these interconnects can lead to system failures, decreased performance, and reduced product lifespan, making their early detection and diagnosis critical. Regardless of where or when defects in the interconnects occur, they should be detected early on, and the corresponding parts replaced. However, existing diagnostic methods for electronic interconnects face significant challenges. There has been a growing interest in monitoring the ongoing health of products and systems to predict failures before a catastrophe. Consequently, prognostics and health management (PHM) technologies have been developed [2 ###reference_b2###]. PHM methods can predict failure, diagnose defects, and eventually improve system quality as well as extend system life. Determining the cause of the failure is essential to enabling forecasted maintenance and reducing downtime. Extensive studies have been conducted on the fault diagnosis of mechanical systems by processing vibration signals [3 ###reference_b3###, 4 ###reference_b4###].\nIn contrast, for electronic interconnects, there is a lack of research on non-destructive methods that can identify both the severity and root causes of defects.\nRather, existing diagnosis methods for electronics require destructive testing to determine the root causes of the defects. Furthermore, previous defect detection methods utilizing DC resistance [5 ###reference_b5###, 6 ###reference_b6###], time domain reflectometry (TDR) [7 ###reference_b7###, 8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###], Scattering parameter (S-parameter) [12 ###reference_b12###, 13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###], radio frequency (RF) impedance [16 ###reference_b16###, 17 ###reference_b17###, 18 ###reference_b18###], and digital techniques [19 ###reference_b19###, 20 ###reference_b20###] are vulnerable to noise, eventually leading to false alarms.\n###figure_1### These methods rely on electronic signals at designated operating frequencies (or in the case of DC resistance, at 0 Hz), which limits their effectiveness. While methods tracking the time evolution of the parameters can estimate the severity of defects effectively, their one-dimensional (1D) trends are insufficient to distinguish the root causes of the defects.\nNevertheless, previous fault detection methods for electronic packages using electronic signals have not yet utilized the signal patterns obtained in a full range of operating frequencies. Instead, often discarded were outliers and irregularities in the signals from defective interconnects. For normal interconnects, certain signal patterns of electronic signals reveal key features, serving as indicators of interconnect characteristics [21 ###reference_b21###]. This observation suggests that the full signal patterns, including those previously considered as outliers, may contain valuable information about defects. Similarly, learning to recognize signal patterns of defective interconnects would enable the extraction of valuable information about the defects.\nTherefore, there is a critical need for a diagnostic approach that can non-destructively detect defects, determine their root causes, and operate effectively in noisy industrial environments. The motivation behind our research is to address this need by developing a method that leverages the full frequency range of reflection coefficient signal patterns, combined with robust machine learning techniques, to provide comprehensive fault diagnosis of electronic interconnects.\nTo address these issues, we propose a novel approach based on discovering specific patterns of electronic signals that provide information about both the severity and cause of defects. We focus on the reflection coefficient signal patterns in the frequency domain, which are distinct from traditional time-series analysis methods for fault diagnosis of electronic packages. The reflection coefficient is an electronic parameter that describes how much of a voltage wave is reflected by an impedance discontinuity in the transmission medium. It is also reported that the coefficient is sensitive to defects in the interconnects[7 ###reference_b7###, 8 ###reference_b8###]. By analyzing the reflection coefficient across a full range of frequencies, we aim to capture comprehensive signal patterns that can reveal detailed characteristics of defects. In this study, we gather the reflection coefficient signal patterns of electronic interconnects with various causes and severity levels of defects. The electronic interconnects serve as representative forms of electronic interconnection [22 ###reference_b22###]. Then, by applying machine learning (ML) and deep learning (DL) techniques, we diagnose the defects only with the signal patterns of the reflection coefficient. The patterns provide an opportunity to apply the learning algorithms to root cause analysis, while the time series data of electronic signals only estimate the severity of defects.\nFurthermore, we introduce a novel severity rating ensemble learning (SREL) approach for fault detection and diagnosis of electronic interconnects, which fully utilizes characteristics of the reflection coefficient signal patterns. Although the conventional ML and DL algorithms with the reflection coefficient patterns provide effective performance in fault diagnosis of electronic interconnects, the conventional algorithms remain susceptible to noise. This vulnerability is a significant concern, particularly given the prevalence of noise in industrial environments. The proposed SREL method improves diagnostic performance and robustness to noise in real industrial environments. Combining the advantages of the reflection coefficient patterns and the SREL method, we aim to significantly advance the field of non-destructive fault diagnosis, improving the reliability and performance of electronic systems in industrial environments. In the experimental results, we demonstrate that the signal patterns of the reflection coefficient extract distinguishable features for different defect states and the proposed SREL method outperforms conventional ML and DL methods, particularly under noisy conditions. As shown in Figure 14 ###reference_###, our method achieves early detection, noise-robustness, and non-destructive analysis of root causes, overcoming the limitations of previous studies for fault diagnosis using electronic signals.\nThe main contributions of this study are as follows:\nWe address the critical need for non-destructive, noise-robust fault diagnosis methods by obtaining signal patterns of the reflection coefficient according to various defect states of electronic interconnects, demonstrating that these patterns enable both early and accurate diagnosis.\nWe show that the signal patterns in the frequency domain are capable of root cause analysis, whereas the previous methods using electronic signals such as DC resistance, TDR, and S-parameters at designated frequencies are not.\nWe demonstrate that the signal patterns are effective features for learning algorithms by conducting dimension reduction on the patterns and providing diagnostic results with the conventional ML and DL methods.\nWe propose a novel severity rating ensemble learning (SREL) approach that fully exploits the characteristics of the signal patterns so that we enhance the diagnostic performance and noise robustness.\nThe remainder of this paper is organized as follows. Section 2 ###reference_### presents a comprehensive review of related works in the field. Section 3 ###reference_### details the proposed approach, including the reflection coefficient patterns and the SREL approach. In Section 4 ###reference_###, we describe the experimental design used to evaluate the effectiveness of our approach. Section 5 ###reference_### presents the quantitative and qualitative experimental results. Finally, Section 6 ###reference_### concludes this paper."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Works",
|
| 15 |
+
"text": "Previous studies for fault detection and diagnosis of electronic interconnects obtained the signal values at designated operating frequencies and tracked the time evolution. This process can estimate the severity of defects effectively, but it is unable to determine root causes without the disassembly of electronic packages and destructive testing. DC resistance, S-parameter, RF impedance, digital signals, and TDR are the electronic parameters that have been utilized in that manner. The DC resistance measurement method has been widely used for reliability monitoring of electronic interconnects because of its simplicity and convenience. The DC resistance responds to a short or an open-state conductor quite well. However, it is not well-suited to indicate the evolution of defects [23 ###reference_b23###]. To overcome this limitation, studies have suggested various electronic parameters at high frequencies as indicators of defect growth. The electronic signals at the high frequencies capitalize on the skin effect to detect defects at an early stage [7 ###reference_b7###].\nS-parameters have demonstrated superior sensitivity compared to DC resistance as defects progress [12 ###reference_b12###]. Researchers, such as Putaala et al. [13 ###reference_b13###], monitored S-parameters during temperature cycling tests on Ball Grid Array (BGA) components, revealing qualitative changes in S-parameters correlating with component degradation, unlike the negligible change in DC resistance. Foley et al. [14 ###reference_b14###] proposed void detection in transmission lines by monitoring changes in the leakage conductance parameter derived from S-parameter measurements. RF impedance, sensitive to surface defects due to the skin effect, has shown promise in detecting electronic component defects [15 ###reference_b15###]. Mosavirik et al. [24 ###reference_b24###] proposed an on-chip impedance sensing approach for detecting various classes of tamper events in cryptographic devices. Digital signal degradation resulting from physical circuit damage has also been explored [19 ###reference_b19###], leading to on-chip health sensing methods for interconnect degradation detection [20 ###reference_b20###]. TDR, a time domain parameter of the reflection coefficient, has been widely applied for fault detection in wiring networks [10 ###reference_b10###, 11 ###reference_b11###]. While these methods offer valuable insights, they exhibit dependency on operating frequencies, making them susceptible to noise and false alarms. The robustness of fault diagnosis methods to noise in high-frequency parameters remains an understudied aspect.\nApplications of artificial intelligence (AI) primarily involved ML regression techniques [17 ###reference_b17###, 25 ###reference_b25###, 26 ###reference_b26###] for electronics fault detection, focusing on remaining useful life estimation. Recent works extend the application of ML and DL techniques in diverse contexts. Chien [27 ###reference_b27###] proposed a DL-based fault diagnosis framework for semiconductor backend processes, demonstrating its effectiveness in predicting maintenance needs. W\u00e4chter et al. [28 ###reference_b28###] used ML for anomaly detection on a system-on-chip under gamma radiation. Bhatti et al. [29 ###reference_b29###] presented a neural network-based signal integrity assessment model for on-chip interconnects in integrated circuits. Fang et al. [30 ###reference_b30###] introduced a prior knowledge-guided teacher-student model for self-supervised intermittent fault detection in analog circuits. Also, the idea of defect identification using frequency or phase measurement and machine learning methods is established in optical metrology [31 ###reference_b31###, 32 ###reference_b32###, 33 ###reference_b33###, 34 ###reference_b34###].\nDespite these advancements, the application of classification algorithms with DL methods has been limited by the constraints of 1D data, often necessitating disassembly and destructive testing for the root cause analysis. Also, the noise robustness of high-frequency parameter-based methods remains inadequately explored, in spite of their vulnerability to noise. To address these gaps, this study proposes a fault detection and diagnosis method that leverages signal patterns in the full range of operating frequencies to reduce dependency on a single frequency, enhancing robustness to noise and providing richer information for learning algorithms. Our focus is on the reflection coefficient, known for its sensitivity to faults in electronic systems."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Proposed Method",
|
| 21 |
+
"text": "###figure_2### ###figure_3###"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Acquisition of Reflection Coefficient Signal Patterns",
|
| 27 |
+
"text": "As depicted in Figure 2 ###reference_###, the interconnects are placed inside the electronic packages. Thus, in order to investigate the defects that occur in the interconnects, disassembly of the packages has been required so far. However, our proposed method utilizes electronic signals, namely the reflection coefficient, obtained outside the packages to implement non-destructive diagnosis. As shown in Figure 3 ###reference_###, the reflection coefficient of an interconnect can be measured by a vector network analyzer at contact pads that are fabricated to be interfaces between the interconnect and the network analyzer. The reflection coefficient, , is expressed as follows [35 ###reference_b35###].\nwhere , , , are incident, reflected voltage waves and load, characteristic impedance respectively. The reflection coefficient is used to define the reflected wave with respect to the incident wave. It can be obtained in time or frequency domain, and we compared both in this study. Also, as implied by the equation, the reflection coefficient is a direct indication of the impedance discontinuity. Basically, the reflection coefficient is obtained in the frequency domain. The coefficient captures the stimulus-response waveforms that contain the behavioral models of the interconnects, encompassing resistance, capacitance, inductance, and changes in electrical properties resulting from physical damage. However, the signal patterns of the electrical parameters, along with their relationship to defect states, have not been thoroughly investigated while the time domain characteristics of the parameters have been widely utilized in fault detection and diagnosis [7 ###reference_b7###, 8 ###reference_b8###, 9 ###reference_b9###, 10 ###reference_b10###, 11 ###reference_b11###]. Instead, RF engineers have usually discarded irregularities in signal patterns caused by the defects. In this study, we leverage the reflection coefficient in the frequency domain, which provides valuable two-dimensional (2D) pattern information beyond single-point measurement at a designated time and operating frequency. By identifying specific patterns that correspond to different defect causes and severity levels, we can develop ML and DL algorithms capable of simultaneously detecting defects and providing information on root causes. The signal patterns are fully exploited throughout this study for reliability assessment on electronic interconnects. To the best of our knowledge, this is the first investigation into root cause analysis using electronic signals in this context."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Severity Rating Ensemble Learning (SREL) Approach for Fault Diagnosis of Electronic Interconnects",
|
| 33 |
+
"text": "###figure_4### To accurately and robustly extract information regarding the defect from the signal patterns of the reflection coefficient, we propose a severity rating ensemble learning (SREL) approach to determine both the root causes and severity levels. Note that it is also the first research work to apply even the conventional AI algorithms to the electronic signal patterns in terms of fault diagnosis for the interconnects. The SREL is specialized in learning the signal patterns that vary distinctly according to root causes and then subtly to severity levels of defects. As shown in Figure 4 ###reference_###, an input pattern is fed to the proposed SREL model that is trained with signal pattern datasets gathered according to various defect states. The proposed algorithm contains a series of baseline networks that can utilize a variety of learning models. Subsequently, the baseline networks are tuned with the ordinal severity and cause labels through supervised learning. The training and test data are obtained through experiments and labeled with the causes of defects and their severity levels.\nFor example, the label A1 indicates the reflection coefficient signal pattern of an interconnect with the defect cause A and a severity level of 1. Users can adjust the number of causes and severity levels according as their applications. To train each baseline network, the entire dataset is divided into two subsets, with severity levels higher or lower than the target severity level. The final fully connected layer computes the probability that the input belongs to the target class using the logistic function. For example, the baseline network to recognize the normal state is trained with the two data subsets: the signal patterns of the normal interconnects and those of the defective ones. The binary output of this baseline network is 1 if the input signal pattern is of the normal state, and 0 if defective. We denote the output of the normal baseline network as . Regarding a baseline network targeting a certain defect state, the training process is as follows. The training dataset is divided into two subsets in reference to the cause of the defect and the severity level :\nwhere the cause of defect , severity level , and is the reflection coefficient vector with the cause and severity level . Each dataset is used to train the corresponding baseline network in the proposed SREL. Here, the dataset includes the signal pattern data for the other causes of defect except for the target class. After training, all baseline networks can output a binary decision, 0 or 1. They denote whether the input reflection coefficient vector belongs to the target class. Given the unknown reflection coefficient pattern , we use the baseline networks to make a set of binary decisions and then aggregate them to predict the severity level regarding the cause of the defect , .\nwhere is the output of the baseline network and denotes the truth-test operator, which is 1 provided that the inner condition is true, and 0 otherwise. Thus, the output of the proposed SREL model is in a form . The cause and severity of defects from the output vector can be simultaneously determined as follows:\nwhere obtains the maximum value in the bracket, and finds the position of the max value in the bracket. When the severity levels of different causes are equal, the priority lies on the cause whose baseline networks produce a higher sum of values from the logistic functions. The severity labels are naturally ordinal, and the signal patterns of interconnects with the same cause of defect share similar features. Compared with the softmax-based multiclass classification trained with the complete dataset, the SREL approach can maintain the relative ordinal relationship within the same defect cause group. In this study, we apply the SREL approach to datasets of normal state, mechanical, and corrosion defects of electronic interconnects. Figure 5 ###reference_### depicts the detailed process of fault detection and diagnosis of a defective interconnect by using the SREL approach in this paper. The signal pattern obtained from the interconnect is fed to the pre-trained SREL network. When the aggregated output is , the maximum value of the output is 3 at the third argument which corresponds to the corrosion defect. Then, the final diagnosis is that the interconnect is defective with corrosion of severity Lv. 3 (C3).\n###figure_5###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Experimental Design",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Test Vehicles and Measurements",
|
| 45 |
+
"text": ""
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1.1",
|
| 49 |
+
"parent_section_id": "4.1",
|
| 50 |
+
"section_name": "4.1.1 Fabrication of test vehicles",
|
| 51 |
+
"text": "###figure_6### As a representative form of electronic interconnects, Cu interconnection was used for the target of our proposed fault diagnosis method. For the convenience of experiments and data acquisition, batches of Cu interconnect specimens were fabricated as shown in Figure 6 ###reference_###. Gold contact pads were deposited on Cu signal lines. In addition, gold electrodes were fabricated on both sides of the signal line to utilize ground-signal-ground probe tips for the reflection coefficient measurements. The gold pads and electrodes enabled reference points that were unaffected by the environmental stresses. 56 specimens were produced on a glass substrate to obtain as many data as possible with a single batch. In total, 15 batches were used in this study.\n###figure_7###"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.1.2",
|
| 55 |
+
"parent_section_id": "4.1",
|
| 56 |
+
"section_name": "4.1.2 Inducing and evaluating defects",
|
| 57 |
+
"text": "Most electronic part failures are packaging-related, and the electronic packages are susceptible to environmental factors. For instance, thermal expansion produces mechanical stresses that may cause material fatigue, thus leading to crack evolution. Humidity and aggressive chemicals can cause corrosion of the packaging materials. Among the aforementioned failure modes described above, we assumed the two representative root causes: Mechanical and corrosion defects. In addition, regarding the severity level, we classified the defects into four levels: the normal state, Lv. 1 (defective but still usable), Lv. 2 (highly recommended for replacement), and Lv. 3 (out of order). As examples of mechanical defects in the interconnect, we precisely induced 1 mm long and 10 m wide cracks in our specimens with a laser cutting machine. The specimens with 1, 3, and 5 cracks were labeled as M1, M2, and M3, respectively, which represent the mechanical defect levels of severity, as depicted in Figure 7 ###reference_### (a). To produce corrosion defects in the interconnect specimens, the batches were exposed to the environmental profile according to the MIL-STD-810G humidity method. The environmental conditions were provided by a temperature and humidity chamber (ESPEC). The specimens were photographed every 12 hours. The interconnect specimens with corrosion of (5-30) %, (30-60) %, and (60-100) % were classified as C1, C2, and C3, respectively. Figure 7 ###reference_### (b) shows the result of evaluating and labeling corrosion defects in the electronic interconnects. The corrosion defects were quantitatively evaluated using an image processing technique as described in Figure 7 ###reference_### (c). Note that images in Figure 7 ###reference_### were not utilized for ML and DL techniques, but the reflection coefficient patterns measured from the specimens were fed to the learning algorithms.\n###figure_8###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1.3",
|
| 61 |
+
"parent_section_id": "4.1",
|
| 62 |
+
"section_name": "4.1.3 Electronic signal measurements: DC resistance, TDR, S-parameters, and reflection coefficient patterns",
|
| 63 |
+
"text": "Figure 8 ###reference_### shows an experimental setup for the RF electronic signal measurements used to investigate the reflection-coefficient-pattern indication of various defects in electronic interconnects. With this setup, TDR and S-parameters can be obtained as well. The batch-type test vehicle contained 56 specimens in total. It allowed convenient measurement by using a probe station and helped the specimens be exposed uniformly to the environmental stress. As can be seen in Figure 8 ###reference_###, the test vehicle was placed on the probe station (MSTECH Model 5500), and the RF signals were measured. This ex-situ measurement setup helped avoid the degradation of RF cables and connectors, thus concentrating only on the electronic signals related to defect evolution in the interconnect. Both ends of the specimen were contacted by high-frequency probes (ground-signal-ground type, GCB Industries Inc. 40A-GSG-2540-EDP) connected to each port of a vector network analyzer (KEYSIGHT E5063A), thus comprising a two-port network. To investigate the signal-pattern indication of various causes and severity levels of defects in the interconnects, we focused on the change in the reflection coefficient that represents the signal returned to the incident port from the interconnect. The network analyzer also provided a time domain function, which mathematically transforms waveforms in the frequency domain into waveforms in the time domain. Specifically, we used the time domain transformation function of the E5063A analyzer to obtain TDR values. To acquire S-parameters at a designated frequency, we measured the S11 parameters at 8 GHz. In addition, the DC resistance between the ends of the specimen was measured by using a digital multimeter (Fluke 1587 FC). Overall, the reflection coefficient patterns were compared with the electronic signals for the previous fault detection methods such as DC resistance, TDR, S-parameters at a designated frequency. Table 1 ###reference_### summarizes the fault diagnosis methods using the electronic signals evaluated in this study."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Comparison Models",
|
| 69 |
+
"text": "To evaluate the proposed method with the dataset we collected, we considered nine methods for the classification of causes and severity levels of the defects. These methods can be categorized into three general methods: our SREL approach, multiclass-CNN, and ML. Three types of CNN baseline networks, EfficientNet [36 ###reference_b36###], 1D-CNN [37 ###reference_b37###, 38 ###reference_b38###, 39 ###reference_b39###] with 3 layers (1DCNN-3) and 1 layer (1DCNN-1) respectively, were tested with SREL and multiclass-CNN. The multiclass-CNN method is a conventional DL technique for classification using the softmax function. In addition, two conventional ML methods, random forest, and k-mean clustering were evaluated to contrast the performance of DL and ML methods with additive noise."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.3",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Data Description and Implementation Details",
|
| 75 |
+
"text": "In our experiments, the signal-pattern data of the reflection coefficient were extracted from 790 samples of electronic interconnects. The numbers of samples for each class were 175 for Normal, 90 for M1, 80 for M2, 100 for M3, 90 for C1, 95 for C2, and 160 for C3, respectively. The ratio of training, validation, and test data, applied to all classes, was 6:2:2. Table 2 ###reference_### lists the number of the training, validation, and testing data. The same set of samples with multiclass defect labels was used to train the SREL, multiclass-CNN, and ML methods. Based on this combination of data, we evaluated the diagnostic performance of our proposed method and the other compared methods.\nHyperparameters for the DL algorithms were optimized as follows. We used the binary cross entropy as the loss function to train the neural networks and minimized it using the Adam optimizer with a learning rate of 0.00005 and a batch size of 256. The number of epochs was determined by the early stopping method. For the K-means clustering model, the number of clusters was 7, and the maximum number of iterations was 300. For the random forest algorithm, the splitting criterion was Gini impurity; the maximum depth of the tree was 5; and the minimum number of samples to split was 2. In this study, the diagnostic performance was evaluated with accuracy defined as TP/(TP+FN), denoting the number of true positives, false positives, true negatives, and false negatives as TP, FP, TN, and FN, respectively. In addition, we examined the macro F1 score to comprehensively evaluate model performance.\nAll methods were implemented with an Intel\u00ae Core\u2122 i5-9600k CPU (3.70 GHz), 32 GB RAM, and NVIDIA GeForce RTX 2070 Super GPU."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Experimental Results and Discussion",
|
| 81 |
+
"text": ""
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.1",
|
| 85 |
+
"parent_section_id": "5",
|
| 86 |
+
"section_name": "Measurement Results: DC resistances, TDR, S-parameters at a Designated Frequency and Signal Patterns of Reflection Coefficient",
|
| 87 |
+
"text": "###figure_9### ###figure_10### Figure 9 ###reference_###(a) shows the DC resistances relative to different defect classes. As the number of cracks in the interconnects increased from 0 to 5, i.e., from the Normal to M3 class, the change in DC resistance was negligible. The average DC resistances for Normal, M1, M2, and M3 were 0.60, 0.62, 0.63, and 0.70 , respectively. Although the values seemed to increase, the changes remained within the range of standard deviation. Similarly, as the corrosion defects progressed, the changes in DC resistances were not significant. The average DC resistances for Normal, C1, C2, and C3 were 0.60, 0.60, 0.62, and 0.65 , respectively. This negligible change in DC resistances across defect states indicates the method\u2019s inability to detect early defects and perform root cause analysis.\nAs shown in Figure 9 ###reference_###(b), the average TDR values corresponding to defect states were 53.2, 58.9, 57.2, 48.8, 59.1, 53.8, and 53.1 for Normal, M1, M2, M3, C1, C2, and C3, respectively. The average S11 parameter values at 8 GHz, as depicted in Figure 9 ###reference_###(c), were -10.0, -6.0, -4.9, -3.7, -4.0, -5.5, and -5.4 dB for Normal, M1, M2, M3, C1, C2, and C3, respectively. There is no significant variation in TDR values, and no clear trend was observed in the S-parameters at the designated frequency with respect to different defect states. Moreover, it is unfeasible to determine the causes of defects using 1D information, such as DC resistances, S-parameters at a specific frequency, or TDR values, which have been employed by previous fault diagnosis methods for interconnects.\nMeanwhile, the signal patterns of the reflection coefficient provided the ability to distinguish the severity and cause of faults in electronic interconnects. As depicted in Figure 10 ###reference_### (a), the reflection coefficient showed identifiable patterns for the mechanical defects. Similarly, Figure 10 ###reference_### (b) shows the signal patterns according to the occurrence and evolution of corrosion defects. The graphs are averaged reflection coefficients in the frequency domain among the samples with the same defect state. As the degree of corrosion aggravated from Normal to C1, C2, and finally, to C3, the reflection coefficients of corroded Cu interconnects also showed distinguishable patterns for the corrosion. Overall, defective interconnects exhibited distinguishable reflection coefficient patterns according to the characteristics of defects, proving the capabilities of both early detection and root cause analysis. Interconnects with the same cause of the defects exhibited similar patterns (features of root causes), and the severity levels induced minor changes such as magnitude offsets and peak shifts (features of severity levels).\n###figure_11###"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.2",
|
| 91 |
+
"parent_section_id": "5",
|
| 92 |
+
"section_name": "Visualization of Reflection Coefficient Patterns by t-SNE method",
|
| 93 |
+
"text": "Measured signal patterns were visualized after dimension reduction in order to examine and understand the feasibility of the signal patterns of reflection coefficient as inputs to ML and DL methods qualitatively. The dimension reduction was conducted based on the t-distributed stochastic neighbor embedding (t-SNE) method that maps high-dimension data into the low-dimensional embedded space [40 ###reference_b40###]. Using t-SNE, we were able to lower the dimension of the reflection coefficient patterns and depict them in a two-dimensional plane. t-SNE is one of the dimension reduction methods to show, herein, how effective reflection coefficient patterns are in distinguishing the root causes and severity levels of defects. As a result, Figure 11 ###reference_### contains the visualization of the signal pattern data segmented according to the labeled classes. Based on those t-SNE-based plots, the efficacy of the reflection coefficient patterns for diagnosing the cause/severity of defects could be verified. In other words, by observing the well-clustered pattern data in the reduced dimension, we were able to confirm that the reflection coefficient patterns are effective data for learning algorithms. Therefore, based on the proposed method, high diagnostic accuracy could be obtained by learning the signal patterns."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.3",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "Diagnostic Performance",
|
| 99 |
+
"text": ""
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5.3.1",
|
| 103 |
+
"parent_section_id": "5.3",
|
| 104 |
+
"section_name": "5.3.1 Diagnostic performance on data without noise",
|
| 105 |
+
"text": "The diagnostic performance of the proposed and the other methods on electronic interconnect data are listed in Table 3 ###reference_###. First, the SREL approach with the EfficientNet baseline network achieved a diagnostic accuracy of 99.3%, outperforming the other models. In the case of the 1D-CNN baseline network, the 3-layer network showed better performance than the 1-layer network. Second, the multiclass CNN model with the EfficientNet backbone predicted the cause/severity of defects with a diagnostic accuracy of 98.6%. The diagnostic performance of CNN-based models can be further improved by stacking the networks deeper. Finally, random forest, another ensemble technique based on ML, produced a diagnostic performance of 98.6%, indicating that the signal patterns of the interconnects were also effective features for ML techniques. Among the various methods, k-means clustering had the lowest accuracy of 81.2%. Furthermore, we presented the number of parameters and inference time of DL methods. Notably, the SREL architecture provided faster inference as well as a smaller number of parameters compared to the multiclass DL method with the same backbone network. It is also the first research work to apply even the conventional learning techniques to signal patterns in terms of fault diagnosis for electronic packages. Hence, meaningful is that the conventional ML and DL methods work well with the reflection coefficient patterns that we experimentally gathered according to the causes and severity levels of defects."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "5.3.2",
|
| 109 |
+
"parent_section_id": "5.3",
|
| 110 |
+
"section_name": "5.3.2 Diagnostic performance on data with additive noise",
|
| 111 |
+
"text": "Fault diagnosis methods aim to enhance the reliability of electronics in real-world industrial applications [41 ###reference_b41###]. An issue in such sites is noise resulting from environmental and operational randomness including interferences of electrical sensors and devices [42 ###reference_b42###]. This industrial noise can be simulated by using white Gaussian noise [43 ###reference_b43###]. Thus, in this study, we tested the conventional AI algorithms and our proposed model with the data injected with the white noise as described in Figure 12 ###reference_###. The noise was produced using MATLAB with the white Gaussian noise function, which provides Gaussian power noise. In results, all methods showed lower diagnostic accuracy as the additional noise increased. This was because the noise caused variation in the characteristics and distributions of the signal data. Notably, the proposed SREL model outperformed the other methods at all levels of white noise tested herein, whereas the performance of the other methods rapidly degraded with the noise. The diagnostic results on the test data with additive noise are shown in Figure 13 ###reference_###. In summary, the proposed method exhibited more robust and stable performance against additive noise compared with conventional ML and the multiclass-CNN methods. ML methods, especially the random forest, showed faster inference with fewer parameters even compared to the SREL methods, but the ML methods turned out to be extremely vulnerable to noise.\n###figure_12### ###figure_13###"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5.4",
|
| 115 |
+
"parent_section_id": "5",
|
| 116 |
+
"section_name": "Discussion",
|
| 117 |
+
"text": "By gathering the reflection coefficient patterns of electronic interconnects according to causes and severity levels of defects, we found out that the signal patterns of reflection coefficients possessed the ability of fault diagnosis, especially with root cause analysis. Dimension reduction using t-SNE clearly showed that the patterns were well grouped according to the defect information, hence indicating that the patterns were effective input data for ML and DL methods. Accordingly, conventional ML and basic DL techniques performed fault diagnosis on the electronic interconnects with satisfying accuracies, which was also a novel result in fault detection and diagnosis of electronic packages because the signal patterns had not been utilized in the field. Although the conventional ML and basic DL methods showed good diagnostic results with the patterns of the reflection coefficient without noises, the industrial noises would deteriorate the diagnostic performance. The diagnostic results on data with additive noise indicated that the ML methods were more vulnerable to noise than DL, confirming that DL was more robust to industrial noises than ML [44 ###reference_b44###]. In this study, our SREL model showed better performance with or without noise, hence proving the capability of a more accurate and robust fault diagnosis method for electronic packaging. The excellent performance of SREL results from the approach where SREL divides a defect cause and severity problem into a series of binary classification sub-problems. It obtains estimates by aggregating the results of each sub-problem. At this point, the final estimation error is bound by the maximum error of binary estimators, as mathematically proven by Chen et al. [45 ###reference_b45###]. Also, it is known that the binary output aggregation outperforms the softmax-based multiclass-classification methods [45 ###reference_b45###]. In this study, the softmax-based multiclass CNN methods failed to account for the ordinal relationships between defect severity levels that the signal patterns showed. Thus, instead of multiclass-CNN methods with a softmax classifier, the proposed method is preferred for the defect cause and severity analysis.\nThe guideline for applying our method is as follows. Reflection coefficients should be obtained with regard to defect states to utilize the SREL approach in industrial applications. After setting the ground truth for the learning algorithm, the SREL model can be built and trained. Then, the trained network could be deployed to industrial fields. When users monitor the signal patterns of the reflection coefficient obtained from components of interest (irrespective of whether this step is operated regularly or not), they can feed the pattern to the network. Then, the SREL model can determine the root cause and severity of the defect based on the pre-trained network. With this information regarding the defects, users would respond to the situation early and effectively.\nThe magnitude of the S-parameter may vary slightly depending on the size of the crack or the shape of the corrosion. However, the S-parameter pattern can effectively distinguish between the causes of defects, whether mechanical damage or corrosion. In reality, these defects evolve continuously rather than discretely. Based on the results of this study, we plan to investigate the feasibility of using S-parameter patterns to estimate the crack size or degree of corrosion.\nIn addition, we provide a comparative evaluation of the fault diagnosis method using electrical signals, as shown in Figure 14 ###reference_###. The comparison was made from five perspectives: cost (ability to detect defects early enough to ensure sufficient remaining useful life), practicality (ease of implementation and a wide range of applications), cost (implementation expenses), noise robustness (stability of results under noise), and root cause analysis (ability to determine the root causes of defects). Compared to other methods, signal-pattern analysis using AI models offers superior noise robustness and root cause analysis capability. Analyzing the signal pattern across the full frequency range reduces dependency on specific frequencies and provides rich information simultaneously. However, the cost of implementing the method can be a challenging issue\n###figure_14###"
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "6",
|
| 121 |
+
"parent_section_id": null,
|
| 122 |
+
"section_name": "Conclusion",
|
| 123 |
+
"text": "In this paper, we presented a novel approach for non-destructive fault diagnosis of electronic interconnects, using the reflection coefficient patterns to distinguish the causes and severity levels of defects.\nWe focused on corrosion and mechanical defects in electronic interconnects with varying severity levels. In the experimental results, we demonstrated that the reflection coefficient patterns exhibited distinguishable features for 7 states (Normal, M1, M2, M3, C1, C2, C3), enabling root cause analysis and early defect detection in a non-destructive way. On the other hand, it was unable to distinguish the defect causes by using the 1D electronic signals including DC resistance, TDR, and S-parameter at a designated frequency.\nThe signal patterns of the reflection coefficient provided effective input to both ML and DL techniques, overcoming the limitations of traditional time domain signal analysis. Utilizing existing CNN (1D-CNN, EfficientNet) and ML (RF, K-means clustering) methods, we achieved a maximum diagnostic accuracy of 98.6 %, indicating the efficacy of the reflection coefficient patterns as features for fault diagnosis.\nTo further enhance diagnostic performance and noise robustness, we introduced the SREL method, which fully utilized the unique characteristics of the signal patterns. Our proposed model achieved a maximum diagnostic accuracy of 99.3 % with our experimental data, outperforming conventional ML and multiclass-CNN approaches, particularly under increased noise level conditions. Our proposed fault diagnosis method facilitates early detection and provides a simultaneous cause and severity analysis, eliminating the need for secondary tools, all while maintaining robustness against noise.\nThe potential applications of the proposed method include fault detection and diagnosis in a wide range of electronic interconnects, such as integrated circuits, printed circuit boards, and flexible electronics. In addition, it could be utilized in quality control processes in the manufacturing line to improve product reliability and reduce downtime.\nIn the future, we plan to explore the extension of our work to different types of electronic devices and materials to further validate its applicability and robustness with various real industrial scenarios. Moreover, the integration of real-time monitoring systems and automatic fault diagnosis algorithms based on the proposed method would enable proactive maintenance and minimize potential faults and damages in electronic systems."
|
| 124 |
+
}
|
| 125 |
+
],
|
| 126 |
+
"appendix": [],
|
| 127 |
+
"tables": {
|
| 128 |
+
"1": {
|
| 129 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Comparison of fault Diagnosis methods using electronic signals in this study</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T1.1\" style=\"width:433.6pt;height:164.5pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-25.1pt,9.5pt) scale(0.896190402695826,0.896190402695826) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1\">Methods</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.2\">Operating frequency(GHz)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.3\">Domain</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.4\">Dimension of information</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.2.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.2.1\">DC resistance</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.2.2\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.2.3\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.2.2.4\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.2.2.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.2.2.4.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.2.2.4.1.1.1\">1D</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.2.2.4.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.2.2.4.1.2.1\">(Single value)</td>\n</tr>\n</table>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.3.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.3.1\">TDR</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.3.2\">0 - 14</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.3.3\">Time</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.3.3.4\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.3.3.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.3.3.4.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.3.3.4.1.1.1\">1D</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.3.3.4.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.3.3.4.1.2.1\">(Single value)</td>\n</tr>\n</table>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.4.4.1\">S-parameter</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.4.4.2\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.4.4.2.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.4.2.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.4.4.2.1.1.1\">8</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.4.2.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.4.4.2.1.2.1\">(Designated frequency)</td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.4.4.3\">Frequency</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.4.4.4\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.4.4.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.4.4.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.4.4.4.1.1.1\">1D</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.4.4.4.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.4.4.4.1.2.1\">(Single value)</td>\n</tr>\n</table>\n</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.1.5.5.1\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.5.5.1.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.5.1.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.5.5.1.1.1.1\">Reflection coefficient pattern</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.5.1.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.5.5.1.1.2.1\">(Our method)</td>\n</tr>\n</table>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.1.5.5.2\">0 - 14</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.1.5.5.3\">Frequency</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.1.5.5.4\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.5.5.4.1\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.5.4.1.1\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.5.5.4.1.1.1\">2D</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.5.5.4.1.2\">\n<td class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.5.5.4.1.2.1\">(Magnitude-frequency pattern)</td>\n</tr>\n</table>\n</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 130 |
+
"capture": "Table 1: Comparison of fault Diagnosis methods using electronic signals in this study"
|
| 131 |
+
},
|
| 132 |
+
"2": {
|
| 133 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>The number of training and test datasets</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T2.1\" style=\"width:433.6pt;height:219.8pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(74.8pt,-37.9pt) scale(1.52667946737713,1.52667946737713) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S4.T2.1.1.1.1.1\">Defect Class</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.1.1.1.1.2\">Training Data</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.1.1.1.1.3\">Validation Data</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.1.1.1.1.4\">Test Data</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.1.1.2.1.1\">Normal</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.1.1.2.1.2\">105</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.1.1.2.1.3\">35</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.1.1.2.1.4\">35</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.3.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S4.T2.1.1.3.2.1\">M1</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.3.2.2\">54</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.3.2.3\">18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.3.2.4\">18</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.4.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S4.T2.1.1.4.3.1\">M2</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.4.3.2\">48</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.4.3.3\">16</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.4.3.4\">16</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.5.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S4.T2.1.1.5.4.1\">M3</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.5.4.2\">60</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.5.4.3\">20</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.5.4.4\">20</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.6.5\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S4.T2.1.1.6.5.1\">C1</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.6.5.2\">54</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.6.5.3\">18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.6.5.4\">18</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.7.6\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row\" id=\"S4.T2.1.1.7.6.1\">C2</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.7.6.2\">57</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.7.6.3\">19</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.1.1.7.6.4\">19</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.1.1.8.7\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b\" id=\"S4.T2.1.1.8.7.1\">C3</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.1.1.8.7.2\">96</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.1.1.8.7.3\">32</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.1.1.8.7.4\">32</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 134 |
+
"capture": "Table 2: The number of training and test datasets"
|
| 135 |
+
},
|
| 136 |
+
"3": {
|
| 137 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Diagnostic performance of comparison methods\n</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S5.T3.1\" style=\"width:433.6pt;height:411.9pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(65.2pt,-61.9pt) scale(1.43003846065395,1.43003846065395) ;\">\n<table class=\"ltx_tabular ltx_align_middle\" id=\"S5.T3.1.1\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.1.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.1.1.1\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.1.1.1.1\">Model</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.1.1.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.1.1.2.1\">Accuracy</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.1.1.3\">Macro</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.1.1.4\"># of</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.1.1.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.1.1.5.1\">Inference</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.2.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.2.2.1\">F1-Score</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.2.2.2\">parameters</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.3.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.3.3.1\">SREL</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.3.3.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.3.3.2.1\">99.3\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.3.3.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.3.3.3.1\">0.991</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.3.3.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.3.3.4.1\">63.7</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.3.3.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.3.3.5.1\">30.1\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.4.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.4.4.1\">(EfficientNet)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.5.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.5.5.1\">SREL</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.5.5.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.5.5.2.1\">97.2\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.5.5.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.5.5.3.1\">0.970</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.5.5.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.5.5.4.1\">4.7</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.5.5.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.5.5.5.1\">3.88\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.6.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.6.6.1\">(1DCNN-3)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.7.7\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.7.7.1\">SREL</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.7.7.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.7.7.2.1\">96.5\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.7.7.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.7.7.3.1\">0.961</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.7.7.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.7.7.4.1\">1.2</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.7.7.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.7.7.5.1\">2.62\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.8.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.8.8.1\">(1DCNN-1)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.9.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.9.9.1\">Multiclass-CNN</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.9.9.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.9.9.2.1\">98.6\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.9.9.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.9.9.3.1\">0.979</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.9.9.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.9.9.4.1\">63.7</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.9.9.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.9.9.5.1\">32.7\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.10.10\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.10.10.1\">(EfficientNet)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.11.11\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.11.11.1\">Multiclass-CNN</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.11.11.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.11.11.2.1\">95.1\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.11.11.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.11.11.3.1\">0.941</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.11.11.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.11.11.4.1\">4.7</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.11.11.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.11.11.5.1\">4.26\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.12.12\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.12.12.1\">(1DCNN-3)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.13.13\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.13.13.1\">Multiclass-CNN</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.13.13.2\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.13.13.2.1\">88.9\u2009%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.13.13.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.13.13.3.1\">0.868</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.13.13.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.13.13.4.1\">1.2</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.13.13.5\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S5.T3.1.1.13.13.5.1\">3.53\u2009ms</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.14.14\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T3.1.1.14.14.1\">(1DCNN-1)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.15.15\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.15.15.1\">ML (RF)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.15.15.2\">98.6\u2009%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.15.15.3\">0.979</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.15.15.4\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T3.1.1.15.15.5\">7.36\u2009ms</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T3.1.1.16.16\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T3.1.1.16.16.1\">ML (K-means)</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T3.1.1.16.16.2\">81.2\u2009%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T3.1.1.16.16.3\">0.821</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T3.1.1.16.16.4\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T3.1.1.16.16.5\">2.56\u2009ms</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 138 |
+
"capture": "Table 3: Diagnostic performance of comparison methods\n"
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
"image_paths": {
|
| 142 |
+
"1": {
|
| 143 |
+
"figure_path": "2304.10207v3_figure_1.png",
|
| 144 |
+
"caption": "Figure 1: Comparison between our work and previous studies for fault detection and diagnosis of electronic packages using electrical signals",
|
| 145 |
+
"url": "http://arxiv.org/html/2304.10207v3/x1.png"
|
| 146 |
+
},
|
| 147 |
+
"2": {
|
| 148 |
+
"figure_path": "2304.10207v3_figure_2.png",
|
| 149 |
+
"caption": "Figure 2: IC packaging and electronic interconnects (Cu interconnects) placed inside the package",
|
| 150 |
+
"url": "http://arxiv.org/html/2304.10207v3/x2.png"
|
| 151 |
+
},
|
| 152 |
+
"3": {
|
| 153 |
+
"figure_path": "2304.10207v3_figure_3.png",
|
| 154 |
+
"caption": "Figure 3: Schematic of the reflection coefficient measurement",
|
| 155 |
+
"url": "http://arxiv.org/html/2304.10207v3/x3.png"
|
| 156 |
+
},
|
| 157 |
+
"4": {
|
| 158 |
+
"figure_path": "2304.10207v3_figure_4.png",
|
| 159 |
+
"caption": "Figure 4: Structure of the proposed SREL approach for fault diagnosis utilizing signal patterns of interconnects",
|
| 160 |
+
"url": "http://arxiv.org/html/2304.10207v3/x4.png"
|
| 161 |
+
},
|
| 162 |
+
"5": {
|
| 163 |
+
"figure_path": "2304.10207v3_figure_5.png",
|
| 164 |
+
"caption": "Figure 5: Working principle of the SREL approach used in our study to distinguish mechanical and corrosion defects with various severity levels",
|
| 165 |
+
"url": "http://arxiv.org/html/2304.10207v3/x5.png"
|
| 166 |
+
},
|
| 167 |
+
"6": {
|
| 168 |
+
"figure_path": "2304.10207v3_figure_6.png",
|
| 169 |
+
"caption": "Figure 6: Test batch of electronic interconnect specimens",
|
| 170 |
+
"url": "http://arxiv.org/html/2304.10207v3/x6.png"
|
| 171 |
+
},
|
| 172 |
+
"7": {
|
| 173 |
+
"figure_path": "2304.10207v3_figure_7.png",
|
| 174 |
+
"caption": "Figure 7: Labeling (a) mechanical and (b) corrosion defects in electronic interconnects. (c) Quantitative evaluation of corrosion using the image processing technique (Note: These images were not utilized for ML and DL techniques)",
|
| 175 |
+
"url": "http://arxiv.org/html/2304.10207v3/x7.png"
|
| 176 |
+
},
|
| 177 |
+
"8": {
|
| 178 |
+
"figure_path": "2304.10207v3_figure_8.png",
|
| 179 |
+
"caption": "Figure 8: Measurement setup for obtaining the reflection coefficient patterns",
|
| 180 |
+
"url": "http://arxiv.org/html/2304.10207v3/x8.png"
|
| 181 |
+
},
|
| 182 |
+
"9": {
|
| 183 |
+
"figure_path": "2304.10207v3_figure_9.png",
|
| 184 |
+
"caption": "Figure 9: (a) DC resistance, (b) TDR, (c) S-parameter values at a designated frequency according to the defect states",
|
| 185 |
+
"url": "http://arxiv.org/html/2304.10207v3/x9.png"
|
| 186 |
+
},
|
| 187 |
+
"10": {
|
| 188 |
+
"figure_path": "2304.10207v3_figure_10.png",
|
| 189 |
+
"caption": "Figure 10: Reflection coefficient patterns of electronic interconnects according to severity levels of (a) mechanical and (b) corrosion defects",
|
| 190 |
+
"url": "http://arxiv.org/html/2304.10207v3/x10.png"
|
| 191 |
+
},
|
| 192 |
+
"11": {
|
| 193 |
+
"figure_path": "2304.10207v3_figure_11.png",
|
| 194 |
+
"caption": "Figure 11: t-SNE dimension reduction results with the reflection coefficient data",
|
| 195 |
+
"url": "http://arxiv.org/html/2304.10207v3/x11.png"
|
| 196 |
+
},
|
| 197 |
+
"12": {
|
| 198 |
+
"figure_path": "2304.10207v3_figure_12.png",
|
| 199 |
+
"caption": "Figure 12: Shapes of the reflection coefficient patterns with increasing additive noise levels",
|
| 200 |
+
"url": "http://arxiv.org/html/2304.10207v3/x12.png"
|
| 201 |
+
},
|
| 202 |
+
"13": {
|
| 203 |
+
"figure_path": "2304.10207v3_figure_13.png",
|
| 204 |
+
"caption": "Figure 13: Diagnostic performance of the compared methods at increasing noise levels",
|
| 205 |
+
"url": "http://arxiv.org/html/2304.10207v3/x13.png"
|
| 206 |
+
},
|
| 207 |
+
"14": {
|
| 208 |
+
"figure_path": "2304.10207v3_figure_14.png",
|
| 209 |
+
"caption": "Figure 14: Comparison of the fault diagnosis methods using electrical signals including the signal-pattern analysis",
|
| 210 |
+
"url": "http://arxiv.org/html/2304.10207v3/x14.png"
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
"validation": true,
|
| 214 |
+
"references": [
|
| 215 |
+
{
|
| 216 |
+
"1": {
|
| 217 |
+
"title": "doi:10.1109/ICSET.2016.7811789.",
|
| 218 |
+
"author": "L. T. Hoai, A. H. Duong, Fault detection on the transmission lines using the time domain reflectometry method basing on the analysis of reflected waveform, in: 2016 IEEE International Conference on Sustainable Energy Technologies (ICSET), (2016), pp. 241\u2013245.",
|
| 219 |
+
"venue": null,
|
| 220 |
+
"url": "https://doi.org/10.1109/ICSET.2016.7811789"
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"2": {
|
| 225 |
+
"title": "doi:https://doi.org/10.1016/B978-0-12-418663-7.00001-0.\n\nURL https://www.sciencedirect.com/science/article/pii/B9780124186637000010",
|
| 226 |
+
"author": "H. Zhang, S. Krooswyk, J. Ou, Chapter 1 - transmission line fundamentals, in: H. Zhang, S. Krooswyk, J. Ou (Eds.), High Speed Digital Design, Morgan Kaufmann, Boston, 2015, pp. 1\u201326.",
|
| 227 |
+
"venue": null,
|
| 228 |
+
"url": "https://doi.org/https://doi.org/10.1016/B978-0-12-418663-7.00001-0"
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
],
|
| 232 |
+
"url": "http://arxiv.org/html/2304.10207v3"
|
| 233 |
+
}
|
20241004/2307.07191v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2311.07237v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2311.07693v2.json
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Matching Aggregate Posteriors in the Variational Autoencoder",
|
| 3 |
+
"abstract": "The variational autoencoder (VAE) [22] is a well-studied, deep, latent-variable model (DLVM) that optimizes the variational lower bound of the log marginal data likelihood. However, the VAE\u2019s known failure to match the aggregate posterior often results unacceptable latent distribution, e.g. with pockets, holes, or clusters, that fail to adequately resemble the prior. The training of the VAE under different scenarios can also result in posterior collapse, which is associated with a loss of information in the latent space. This paper addresses these shortcomings in VAEs by reformulating the objective function to match the aggregate/marginal posterior distribution to the prior. We use kernel density estimate (KDE) to model the aggregate posterior. We propose an automated method to estimate the kernel and account for the associated kernel bias in our estimation, which enables the use of KDE in high-dimensional latent spaces. The proposed method is named the aggregate variational autoencoder (AVAE) and is built on the theoretical framework of the VAE.\nEmpirical evaluation of the proposed method on multiple benchmark datasets demonstrates the advantages of the AVAE relative to state-of-the-art (SOTA) DLVM methods. Here is the link to the code: https://github.com/Surojit-Utah/AVAE.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The development of DLVMs is an important topic of research that is widely used for generative modeling and representation learning. The VAE [25 ###reference_b25###, 22 ###reference_b22###], a DLVM, learns a joint distribution distribution, , that captures the relationship between a set of hidden variables, , and the observed variables, . VAEs model the data distribution, , by optimizing the parameters, , typically a deep neural network known as the generative model/decoder. The VAE approximates the true posterior by a surrogate distribution, , that informs the objective function to use a latent subspace that is likely to maximize . The parameters () of the surrogate posterior is another deep neural network known as the inference model/encoder. The encoder () and decoder () parameters of the VAE are jointly optimized to maximize the evidence lower bound (ELBO).\nDespite strong theoretical foundations, the VAE fails in matching the aggregate posterior to the prior . The mismatch between distributions results in clusters or holes in the latent space, indicating regions strongly supported under the prior may have low density under aggregate posterior [13 ###reference_b13###, 21 ###reference_b21###] (and vice versa). The presence of holes increases the mismatch between the learned, , and real data distribution, , leading to the generation of low-quality samples. To alleviate this issue, methods in [14 ###reference_b14###, 24 ###reference_b24###] use a flexible prior, and the authors of [23 ###reference_b23###] match the prior in two stages. An additional regularization loss is added to the ELBO to match aggregate distributions that help in learning meaningful representations [84 ###reference_b84###] and improved disentanglement of latent factors [73 ###reference_b73###]. The estimation of is challenging, thus the methods in [84 ###reference_b84###, 73 ###reference_b73###] uses an adversarial classifier or a kernel method, e.g., MMD [6 ###reference_b6###], to match the aggregate posterior to the prior, similar to the Wasserstein autoencoders [38 ###reference_b38###].\nThe posterior distribution of the VAE might collapse to the prior for a subset or all of the latent dimensions during the training of VAEs. Under such scenarios, the representations produced by the encoder on the collapsed latent dimensions lack information that the decoder can use to reconstruct the input faithfully. This phenomenon is known as the posterior collapse or the KL vanishing effect [81 ###reference_b81###, 69 ###reference_b69###, 69 ###reference_b69###]. We expect to encounter such degenerate solutions more often with the -VAE [71 ###reference_b71###] that advocates the use of higher values for the improved disentanglement of the latent factors. The analysis in [13 ###reference_b13###] explains the minimization of the mutual information between the latent () and observed variables () for higher values. Several methods have been proposed to circumvent this issue, such as the KL annealing strategy [80 ###reference_b80###, 81 ###reference_b81###], explicit inhibition of the distribution matching [69 ###reference_b69###], use of complex priors [14 ###reference_b14###, 24 ###reference_b24###], and special training policy [82 ###reference_b82###].\nIn this work, we address the limitations of the VAE by matching the aggregate posterior to the prior in the ELBO framework derived from first principles. We use KDE in the latent space to model the aggregate posterior, . The use of KDE in the AVAE helps in a better estimate of differences between distributions relative to the adversarial training and kernel-based method used in [84 ###reference_b84###, 73 ###reference_b73###, 3 ###reference_b3###, 38 ###reference_b38###]. In addition to improvement in the quality of the generated samples, matching the aggregate posterior to a prior finds potential application in the meaningful interpretation of the latent generative factors [73 ###reference_b73###, 72 ###reference_b72###], outlier detection [70 ###reference_b70###], and data completion [85 ###reference_b85###, 86 ###reference_b86###]. Unlike other variants of the VAE that strive to match marginal posterior to the prior [73 ###reference_b73###, 72 ###reference_b72###, 84 ###reference_b84###], the proposed method does not require additional regularization terms or hyperparameters to the objective function. Moreover, we propose a heuristic that automatically adjusts the value during the training instead of empirically estimating the for a dataset using cross-validation. The potential benefits of using KDEs for matching distributions have been thoroughly studied in [70 ###reference_b70###]. Though KDEs are used in [70 ###reference_b70###] for matching the aggregate posterior distribution to the prior, the objective function is not derived in the general framework of DLVMs, and it is not well suited to high-dimensional latent spaces, e.g., , which restricts its application for modeling complex datasets, such as the CIFAR10 [97 ###reference_b97###]. We correct the bias in KDE bandwidth estimation that qualifies the AVAE to use KDE in high-dimensional latent spaces (dimensions > 100).\nThe main contributions of this work are summarized as follows:\nMatching the aggregate posterior distribution to the prior in the VAE objective function using KDE without any modification of the ELBO.\nAn automated method for estimating KDE bandwidth that allows using KDEs in high-dimensional latent spaces (dimensions ).\nEvaluations showing that the AVAE addresses the shortcomings in the formulation (pockets/clusters) and training (posterior collapse) of the VAE.\nThe regularization scalar is updated during training using the proposed heuristic. Thus, the AVAE is free from tuning the hyperparameter, .\nEmpirical evaluation of the proposed method using different efficacy measures on multiple benchmark datasets, producing results that compare favorably with state-of-the-art, likelihood-based, generative models."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "Several extensions to the formulation of the VAE address known limitations, such as alleviating posterior collapse [68 ###reference_b68###, 69 ###reference_b69###], better matching of marginal posteriors [73 ###reference_b73###, 72 ###reference_b72###], and reducing over-regularization [14 ###reference_b14###, 24 ###reference_b24###]. Methods matching marginal posteriors are relevant to our work. These methods introduced an additional regularization term to the objective function [73 ###reference_b73###, 72 ###reference_b72###] (along with a hyperparameter) to encourage statistical independence of latent factors. An interesting analysis of the VAE objective is done in [10 ###reference_b10###] (RAE), which suggests that an autoencoder with regularized decoder parameters is as good as the VAE.\nThe generative adversarial network (GAN) is another popular generative model that implicitly matches distributions using a discriminator [20 ###reference_b20###, 32 ###reference_b32###]. GANs produce novel, realistic examples, such as images with sharp and distinct features, which are difficult for even humans to identify as generated images [4 ###reference_b4###]. Nevertheless, GANs do not produce a reliable matching form data samples into the latent space [31 ###reference_b31###], and there are significant challenges in optimizing the objective function of a GAN [62 ###reference_b62###, 64 ###reference_b64###, 61 ###reference_b61###]. GANs are very particular about the architecture of the discriminator, training strategy, and the associated hyperparameters [28 ###reference_b28###, 33 ###reference_b33###]. The adversarial autoencoder (AAE) [3 ###reference_b3###] is a likelihood-based generative model that implicitly matches the aggregate posterior in the latent space of an autoencoder to a prior with the help of a discriminator.\nWAEs [38 ###reference_b38###] is another likelihood-based generative model that explicitly matches the aggregate posterior to a prior in the latent space (unlike VAEs). In the WAE, the Wasserstein distance between the data and generated distribution is minimized by factoring the latent variable in its formulation. The regularization term in WAEs is computed using two different strategies. In one approach, a discriminator is used in the latent space, as in AAEs, and is known as the WAE-GAN. In the other approach, the maximum mean discrepancy (MMD) [6 ###reference_b6###] is used to compute the divergence between distributions in the latent space, known as the WAE-MMD."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Method",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Background",
|
| 27 |
+
"text": "The goal of a DLVM is to learn the joint distribution of the latent variables, , and the observed variables, , such that the resulting (generative) distribution closely approximates the true but unknown data distribution, .\nIn the DLVM, learns the mapping from the latent space to observed space using the samples generated by and model parameters . This setup is used to generate new samples not present in the observed dataset. Thus, the aim is to determine the correct setting of the parameters, , such that the probability of each observed data, , is maximized. The objective function of the DLVM is defined as follows:\nThe objective function defined in 2 ###reference_### gives a lower bound on the data log likelihood and is known as the evidence lower bound (ELBO).\nUse of as the proposal distribution in equation 1 ###reference_### gives us the objective function of the VAE [25 ###reference_b25###, 22 ###reference_b22###]. The choice of the probability distribution for is a modeling choice, and for VAEs, it is typically a Gaussian distribution [25 ###reference_b25###, 22 ###reference_b22###]. The VAE uses an inference network (also called a recognition model), , a deep neural network parameterized by that estimates the parameters of the Gaussian distribution for any input , .\nMatching the conditional distribution to in VAEs often fails to match the aggregate posterior in the latent space [21 ###reference_b21###, 13 ###reference_b13###]. The mismatch leads to, among other things, holes or pockets in the latent distribution that subsequently affects the quality of the generated samples. Increasing the strength of the regularization term in the objective function of VAEs does not help better match the aggregate posterior to the prior [71 ###reference_b71###]. Instead, it results in a scenario known as posterior collapse [65 ###reference_b65###, 66 ###reference_b66###], where the conditional distribution matches to the prior for a subset of the latent dimensions. Such degenerate solutions produce latent encodings that are no longer meaningful, and the decoder tends to ignore in the reproduced observed data, resulting in poor reconstruction. This phenomenon is related to the identity, , where the is the mutual information between the observed and latent variables. Thus, increasing the strength of the KL term would lead to better aggregate posterior matching but would lower the mutual information between the latent variables and the data. Several variants are proposed [73 ###reference_b73###, 72 ###reference_b72###, 23 ###reference_b23###] to circumvent these issues encountered in the VAE that emphasizes matching the aggregate posterior to a prior."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Aggregate Variational Autoencoder (AVAE)",
|
| 33 |
+
"text": "Instead of parametric distribution on the conditional probability, as used in VAEs, we propose to represent the aggregate distribution using kernel density estimates (KDE). KDE used to approximate the aggregate posterior distribution is defined as:\nUsing KDEs, the probability at for the proposal distribution is estimated using KDE samples, , and the kernel, , with an associated bandwidth, . We use a random subset of the training data, , that is shuffled every epoch to produce KDE samples in the latent space, is a deep neural network parameterized by , known as the encoder. We use a deterministic encoder (ignoring the variances along the latent axes), unlike VAEs. Through multiple empirical evaluations, we show that using a deterministic encoder in the AVAE does not rob it of expressive power compared to a regular VAE or its variants.\nThe ELBO objective function using KDE-based proposal distribution is defined as follows:\nEquation 4 ###reference_### gives us the objective function of the AVAE. In comparison to the proposal distribution in VAEs, KDE-based approximation matches the aggregate posterior, , to the prior, , without any modifications to the ELBO formulation. Compared to the -TCVAE [72 ###reference_b72###], the AVAE does not have a mutual information (MI) term in its objective function. The absence of the MI in the AVAE also reduces the number of hyperparameters.\nThe random (data) variable typically exists in high dimensions, and thus the probability of is valid only for a small region in the latent space, i.e., is nonzero for a small region in the latent space. We use as an estimate to maximize in equation 4 ###reference_###. Considering this modeling choice, the objective function of the AVAE then becomes:\nWe use the multivariate Gaussian distribution or Bernoulli distribution as the conditional likelihood distribution, in 5 ###reference_###, depending on the dataset. The parameters of the chosen distribution are estimated using another neural network known as the decoder, , parameterized by . The objective function in 5 ###reference_### is optimized using the stochastic gradient descent (SGD) that jointly updates the encoder and decoder parameters, and , respectively. The first term in the objective function tries to reproduce the input as closely as possible using the corresponding latent statistics (reconstruction loss), while the KL term (matching the aggregate posterior to the prior) regularizes the model parameters.\nThe objective function of the AVAE is similar to that of WAEs, which have a reconstruction term and a divergence penalty on the aggregate distribution over latent representations. The divergence measure regulates the trade-off between the reconstruction and latent regularization loss. Similar to WAEs, the AVAE has the flexibility in choosing reconstruction cost terms by considering different distributions for . The divergence penalty in the AVAE is the KL divergence, a particular case of the WAE. Nevertheless, the AVAE has provable statistical properties of the latent space, and the proposed method has empirically demonstrated its merit over the WAE under several evaluation metrics discussed in subsequent sections."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.2.1",
|
| 37 |
+
"parent_section_id": "3.2",
|
| 38 |
+
"section_name": "3.2.1 Training:",
|
| 39 |
+
"text": "The objective function of the AVAE defined in 5 ###reference_### has two terms: the reconstruction loss and KL-divergence-matching of the aggregate posterior to the prior. The aggregate posterior, , in the AVAE is represented using KDE. A random subset of the training data forms KDE samples that is shuffled after every epoch. Remaining samples are used for optimizing the objective function 5 ###reference_### using the SGD that updates the model parameters, and . Shuffling KDE samples in in every epoch changes the aggregate posterior, , used in the AVAE objective function. However, the evolving aggregate posterior does not impact (adversely) the training of the AVAE, and the loss curves on multiple datasets demonstrate the stable optimization of the AVAE objective function (refer to Figure 1 in the supplementary). Moreover, an update of the and in every epoch results in better performance of the AVAE (compared with a fixed ) under different metrics across datasets (refer to Table 1 in the supplementary).\nWithout any loss of generality, we use the isotropic Gaussian kernel in KDE for this work, which introduces a bandwidth parameter. There are many heuristics for estimating the kernel bandwidth used in KDE, and there is no established solution for unknown distributions. Furthermore, the estimation of KDE bandwidth is particularly challenging in high-dimensional latent spaces (dimensions > 50). We present a bandwidth estimation method in section 3.4 ###reference_### that uses the knowledge of the prior distribution, , to estimate KDE bandwidth for a given latent dimension and a given number of KDE samples. The proposed bandwidth estimation technique can even scale to higher dimensional latent spaces, i.e., dimensions > 100.\nSeveral extensions of the VAE [80 ###reference_b80###, 81 ###reference_b81###, 83 ###reference_b83###, 79 ###reference_b79###] propose automated ways to determine the hyperparameter that balances the loss terms in the objective function. In a similar vein, we propose a data-driven technique to determine that balances the loss terms in the AVAE objective function. An outline of the training of the AVAE is presented in Algorithm 1 ###reference_###.\nInput: Data , Latent dimensions , KDE samples ."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2.2",
|
| 43 |
+
"parent_section_id": "3.2",
|
| 44 |
+
"section_name": "3.2.2 Estimation of :",
|
| 45 |
+
"text": "The objective function of the standard VAE does not introduce a hyperparameter to weigh the loss terms. However, it is a common practice to assign weights to different terms in the objective functions [38 ###reference_b38###, 10 ###reference_b10###, 3 ###reference_b3###] for various reasons, such as stability in optimization and application-specific trade-offs. Likewise, several variants of the VAE [71 ###reference_b71###, 72 ###reference_b72###] use a hyperparameter, , to control the contribution of the loss terms in the objective function. It is often challenging to decide the appropriate value of these hyperparameters for a particular model architecture, dataset, and other related settings for optimization. The widely used strategy under these circumstances is to set the hyperparameter value using cross-validation.\nTo alleviate these issues, methods proposed in [83 ###reference_b83###, 79 ###reference_b79###], among others, have devised automated strategies to determine . The method in [83 ###reference_b83###] uses a PI controller that manipulates the value of as the learning progresses. Assuming the decoder predicts the parameter of the multivariate Gaussian distribution, [79 ###reference_b79###] presents two approaches to learning the Gaussian variance, (equivalent to learning ). In the first approach, an additional parameter is trained with the encoder-decoder parameters to learn the trade-off factor, . In another approach, the maximum likelihood estimate (MLE) determines the variance analytically.\nSimilar to these approaches, the proposed AVAE optimization sets beta to weight the gradient of the regularization term relative to the reconstruction loss:\nwhere is an example in the validation set, , and is the corresponding reconstructed sample produced by the decoder. Relative to [83 ###reference_b83###], the proposed approach is simple yet effective, as demonstrated by the empirical evaluations. The update of during the training of the AVAE on multiple datasets is reported in the supplementary (Figure 1). Moreover, this formulation can be extended to any distribution chosen for the log conditional likelihood, , rather than being limited to only a Gaussian, as in [79 ###reference_b79###]."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "Properties of the Aggregate Posterior of the AVAE",
|
| 51 |
+
"text": "Considering the standard normal distribution, , as the prior distribution, , we analytically derive the expected aggregate posterior distribution of a trained AVAE. For a trained AVAE model, we assume the gradient of the objective function (5 ###reference_###) w.r.t to latent encodings, \u2019s (refer to Algorithm 1 ###reference_###), is zero. In our analysis, we consider only the KL divergence term in the objective function. Setting the derivative of the to , we derive the same expression as in equation of [70 ###reference_b70###]. Following the steps in [70 ###reference_b70###], we prove the aggregate posterior distribution of the AVAE is , in expectation, where is KDE bandwidth. The proof is also consistent with the known properties of KDEs generally \u2014 KDEs introduce a bias that is characterized by a convolution of the kernel with the underlying distribution."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.4",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "KDE Bandwidth Estimate",
|
| 57 |
+
"text": "Estimating KDE bandwidth can be challenging, and solutions in the literature are often related to particular applications. Many heuristics are proposed for bandwidth estimation under general circumstances [100 ###reference_b100###]. However, here, we use the knowledge of the prior distribution, , to our advantage for estimating KDE bandwidth used for modeling the aggregate distribution in the latent space, . We devise an objective function such that the empirical aggregate distribution, , in the latent space approaches the target distribution, , as the system converges. Thus, we set the kernel bandwidth to minimize the KL divergence between the analytical prior distribution and KDE of a finite set of samples from the prior distribution, as follows:\nwith latent dimension, , and a number of KDE samples, . In this optimization problem, we use samples from the such that the probability of the samples is maximized w.r.t the aggregate posterior, . Table 1 ###reference_### reports the optimum bandwidth, , for different scenarios. We use gradient-based optimizers, such as Adam, to learn the single parameter, in 7 ###reference_###. We observe in Table 1 ###reference_### that for higher latent dimensions with limited KDE samples (e.g., starting at with ), the optimal bandwidth is greater than the standard deviation of the prior distribution, . Given the known bias KDE introduces in the AVAE optimization, optimizing the encoder under these conditions would degenerate to samples converging at the origin (posterior collapse).\nGiven , we know from section 3.3 ###reference_### that the distribution in the latent space of the AVAE converges to , where is bias introduced by KDE. However, we could not consider as the target distribution, , for optimization of the objective function in 7 ###reference_###, as is unknown. We hypothesize that this is one of the reasons for the optimal bandwidth to be greater than the standard deviation of the prior distribution in bigger latent spaces (Table 1 ###reference_###). Thus, we must factor in the bias, , introduced by KDE to estimate the bandwidth. To this end, we propose to use a scaled version of the target distribution, , for optimization of the objective function in 7 ###reference_###, where the scaling factor is unknown. We need to estimate the optimum bandwidth for . Given as the optimum bandwidth for , the estimated bandwidth for is by linear property of the Gaussian distribution. Moreover, we know (from section 3.3 ###reference_###) that with as KDE bandwidth, the latent distribution of the AVAE would have a bias, , at convergence. We use this property to solve for the scaling factor, , where we set the variance equal to the bias, , to get the scaling that accounts for both the ideal optimal bandwidth and the bias:\nThis simple but elegant strategy of handling the bias in KDE addresses the challenge of estimating KDE bandwidth in high-dimensional latent spaces. Notice that because , we avoid mode collapse because the system only degenerates () as the number of samples goes to zero or the dimensionality goes to infinity.\nWith the bias scaling factor, , we get estimates of the bias-corrected KDE bandwidth () reported in Table 1 ###reference_###, which are the scaled versions of the optimum bandwidth (equation 7 ###reference_###). In the revised estimate, the optimal bandwidth is less than the standard deviation of the prior distribution, , for all dimensions in Table 1 ###reference_###, as expected. The bias-corrected bandwidth encourages the use of KDEs in bigger latent spaces (e.g., dimensions ) that makes the AVAE appropriate for modeling complex datasets (a limitation in the previous KDE-based aggregate matching [70 ###reference_b70###])."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Experiments",
|
| 63 |
+
"text": ""
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.1",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Experimental Setup",
|
| 69 |
+
"text": "Benchmark Methods:\nIn comparisons, we consider the conventional VAE [22 ###reference_b22###] and other variations of VAE that modify the original formulation in an attempt to match the aggregate posterior to the prior [73 ###reference_b73###, 72 ###reference_b72###]. Among others, -TCVAE [72 ###reference_b72###] is the closest to the AVAE formulation, as the objective function does not introduce any additional, ad-hoc loss terms. The RAE [10 ###reference_b10###] is chosen as one of the baseline models due to its performance on multiple benchmark datasets. Other maximum likelihood-based models such as the AAE [3 ###reference_b3###] and WAE [38 ###reference_b38###] match aggregate posterior in the latent space of a deterministic autoencoder. The AAE [3 ###reference_b3###] implicitly matches aggregate distributions using a discriminator in the latent space. We use the WAE-MMD (with IMQ kernel) in our analysis due to the stability in training. We study the VAE [22 ###reference_b22###], -TCVAE [72 ###reference_b72###], RAE [10 ###reference_b10###], AAE[3 ###reference_b3###], and WAE-MMD [38 ###reference_b38###] as competing methods to the AVAE.\nEvaluation Metrics: Ideally, the evaluation of a DLVM should include a comparison of the model\u2019s data distribution and that of the true data. Of course, this is infeasible because true data distribution is unknown. Many methods use the quality of the samples produced by the models in the observed space as a proxy for the actual distribution. In this work, we use the Fr\u00e9chet Inception Distance (FID) [11 ###reference_b11###] to quantify the quality of the samples. In addition, we evaluate the data distributions learned by different models using the precision-recall metric [75 ###reference_b75###], where the precision evaluates the quality of the generated samples, and the recall assesses whether the model data distribution captures the variations present in the original but unknown data distribution. Besides the attributes of the model data distribution, we evaluate the properties of the latent representations of the competing methods. In particular, we are interested in the presence of holes in the latent distribution, and we use entropy of the aggregate posterior distribution as an indicator of holes/clusters. We train each method times on a dataset for all empirical evaluations, initialized differently in every run.\nDatasets: We use several popular benchmark datasets, MNIST [94 ###reference_b94###], CelebA [96 ###reference_b96###], and CIFAR10 [97 ###reference_b97###] for empirical evaluation of different methods. To address the dataset\u2019s complexity, the size of the latent space, neural network architectures, model-specific hyperparameters, and other optimization parameters are altered accordingly. Details of the neural network architectures and other parameter settings for all the benchmark datasets used by the competing methods are reported in sections 2.1 and 2.2 in the supplementary material."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Results",
|
| 75 |
+
"text": ""
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.2.1",
|
| 79 |
+
"parent_section_id": "4.2",
|
| 80 |
+
"section_name": "4.2.1 Evaluation of the Model Data Distribution",
|
| 81 |
+
"text": "We quantitatively evaluate the generated samples in this experiment using the FID scores [11 ###reference_b11###] on multiple benchmark datasets. A lower FID score indicates better matching of the data distributions. Besides the FID metric, we evaluate the diversity and quality of the generated samples using the precision-recall metric [75 ###reference_b75###]. A higher precision indicates good quality of the generated samples, and a higher recall suggests that the model data distribution covers the modes present in the true data distribution. Except for the RAE, all the methods considered in this experiment use as the prior distribution. For the RAE, we approximate the distribution in the latent space by the Gaussian distribution. Parameters of the Gaussian distribution derived from the latent representations are used to generate new data samples. We know that the latent distribution for the AVAE convergences to , where h is KDE bandwidth (section 3.3 ###reference_###). Therefore, we use samples drawn from the distribution, , to evaluate the generative capability of AVAEs. For a fair comparison, we have used the hyperparameter settings suggested by the author or recommended in the literature.\nThe FID and precision-recall scores are reported in Table 2 ###reference_###. The VAE does reasonably well for the MNIST and CelebA datasets. However, its performance drops significantly for the complex CIFAR10 dataset. Despite the importance given to match the aggregate posterior in the -TCVAE ( for comparable reconstruction loss), it fails to address the shortcomings of the VAE. Furthermore, the performance of the -TCVAE is poorer than the regular VAE. These results manifest the limitations in the formulation of the VAE (objective function and modeling assumptions) to model the data distributions. Other DLVMs (AAE, WAE, and AVAE) matching the aggregate posterior to the prior using a deterministic autoencoder do better than VAEs, in general. The AAE (aka WAE-GAN) closely follows the best performing methods under different evaluation metrics. We hypothesize that the kernel-based method used in WAE (WAE-MMD) to evaluate the mismatch between distributions is possibly leading to poor performance (justified by low entropy scores in Table 3 ###reference_###), as the reconstruction error is comparable to all other methods (refer to the MSE per pixel in Table 5 of the supplementary material). The performance of the WAE gets worse for the CIFAR10 dataset using high-dimensional latent space . The performance of the RAE is promising across all datasets under different evaluation scenarios. The generative capability of the AVAE is the best among all the considered methods for all the benchmark datasets under different evaluation metrics studied in this work, except for the precision on the CIFAR10 dataset (the second best). It is important for any generative model to capture the modes present in a dataset, indicated by high recall scores. The AVAE consistently outperforms other methods under the recall metric, resulting in the best FID scores under all evaluation scenarios.\nWe investigate the poor performance of the VAE and -TCVAE on the MNIST and CIFAR10 datasets. Other than the CelebA dataset, we observe the reconstruction loss of the VAE and -TCVAE to be relatively higher than other methods (refer to Table 5 in the supplementary material). On further analysis, we discovered that both the VAE and -TCVAE suffer from the posterior collapse when trained on the MNIST and CIFAR10 datasets (refer to section 2.3 in the supplementary). For the MNIST dataset, and (out of ) latent dimensions collapsed for the VAE and -TCVAE, respectively. Collapsed dimensions reduce the bottleneck capacity of a DLVM, resulting in higher reconstruction loss. The posterior collapse subsequently impairs the VAE and -TCVAE to model the data distributions, leading to the worst FID scores for the -TCVAE on the MNIST and CIFAR10 datasets, followed by the VAE."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.2.2",
|
| 85 |
+
"parent_section_id": "4.2",
|
| 86 |
+
"section_name": "4.2.2 Entropy of the Aggregate Posterior Distribution",
|
| 87 |
+
"text": "In this experiment, we evaluate deviations of the resultant aggregate distribution, , beyond the second moment (other than the mean and covariance), as we would expect from holes or clusters in the distribution. For this, we use the entropy of the aggregate posterior distribution to quantify how close it is to Gaussian, after whitening the distribution, , to remove the effects of the second moment mismatch. Because the Gaussian distribution has the maximum entropy (for a given mean and covariance), we use the entropy of the whitened data. Entropy is defined as\nwhere is the aggregate posterior distribution over the whitened data. We use KDE (defined in 3 ###reference_###) for estimating the density for all methods because it can, in principle model the deviations we are seeking to evaluate. The bandwidth required in KDE for the latent dimensions (for different datasets) and KDE samples is derived using the strategy defined in section 3.4 ###reference_###. The entropy computation uses the held-out set of the datasets studied in this work. The entropy of the standard normal distribution (leaving out the constants) derived analytically serves as the ground truth.\n###figure_1### From the results reported in Table 3 ###reference_###, we observe that the entropy scores of the VAE and -TCVAE are far off from the ground truth for the MNIST and CIFAR10 datasets even using the whitened latent representations. Low entropy scores of the VAE and -TCVAE can be attributed to the formation of clusters as observed in Figure 1 ###reference_### (refer to section 2.3 in the supplementary for more results). Besides the posterior collapse, the entropy scores offer another perspective to explain the high FID scores of the generated samples produced by the VAE and -TCVAE for the MNIST and CIFAR10 datasets. Poor FID scores of the WAE can be related to the low entropy values across datasets. The low entropy scores of the RAE are not surprising because it does not attempt to match any prior distribution in the latent space. However, the regularization approach in the RAE is more effective than the VAE. The AAE has entropy scores comparable to the AVAE, and it also helps us comprehend the consistent FID scores of the generated samples. The best entropy score of the AVAE for all the datasets indicates the close matching of the aggregate posterior to the prior, as shown in Figure 1 ###reference_###, where we do not observe clustering of the latent representations."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.2.3",
|
| 91 |
+
"parent_section_id": "4.2",
|
| 92 |
+
"section_name": "4.2.3 Ablation Study",
|
| 93 |
+
"text": "In this experiment, we study the effect of the number of KDE samples on the performance of the AVAE under different evaluation metrics. The number of KDE samples used in the ablation study is , and for the MNIST and CIFAR10 datasets. We report the FID, precision-recall scores, and the reconstruction loss, i.e., the mean squared error (MSE) per pixel in Table 4 ###reference_###. The AVAE produces comparable results with a very few KDE samples, , even in high-dimensional latent space ( for the CIFAR-10 dataset). The stable optimization of the AVAE objective function with fewer KDE samples, such as for the MNIST and CIFAR10 datasets, corroborates the accuracy and robustness of the proposed KDE bandwidth estimation technique. Overall, the performance of the AVAE under multiple metrics is slightly better with higher KDE samples. Therefore, we use and for the MNIST, CelebA, and CIFAR10 datasets for all the evaluations reported in the paper."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "We propose a novel algorithm, the aggregate VAE (AVAE), based on the framework of the VAE to match the aggregate posterior distribution to the prior using KDE. Using the known properties of the prior distribution, we devised a method to estimate KDE bandwidth in high-dimensional latent spaces (dimensions ) that allows the modeling of complex datasets using the AVAE. The dynamic adjustment of the scaling factor, , using the validation data avoids the hyperparameter tuning using cross-validation. The training of the AVAE does not suffer from the posterior collapse, as in VAEs and -TCVAEs, and we avoid such failures without the modification of the ELBO formulation [69 ###reference_b69###, 24 ###reference_b24###, 14 ###reference_b14###] and use of any complex training schedules [80 ###reference_b80###, 81 ###reference_b81###, 82 ###reference_b82###]. We demonstrate the efficacy of the proposed method on multiple datasets, and the AVAE consistently outperforms the competing methods under different evaluation metrics. Close matching of the aggregate latent distribution to the prior with comparable reconstruction loss resulted in the best FID, precision, and recall scores for the AVAE. High entropy scores for the AVAE indicate that the latent representations are close to Gaussian and have a lower chance of encountering holes/clusters in the distribution. Through extensive empirical evaluation, we demonstrate the effectiveness of KDE in matching distributions in high-dimensional latent spaces compared to other methods, such as the kernel-based method used in the WAE-MMD and the discriminator in the AAE. In the AVAE, the cardinal latent axes do not represent the generative factors, unlike the regular VAE, due to matching the aggregate posterior to isotropic Gaussian, invariant to rotation. We plan to study this issue and devise a statistical method to identify the latent explanatory vectors."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [],
|
| 103 |
+
"tables": {
|
| 104 |
+
"1": {
|
| 105 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Optimal bandwidths, (estimated using the objective function defined in <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2311.07693v2#S3.E7\" title=\"In 3.4 KDE Bandwidth Estimate \u2023 3 Method \u2023 Matching Aggregate Posteriors in the Variational Autoencoder\"><span class=\"ltx_text ltx_ref_tag\">7</span></a>) and corresponding bias-corrected estimations ( scaled by the factor ) for a given latent dimension () and number of KDE samples (). The estimated bandwidth increases with increasing dimensions (vertical) and decreases with increasing sample size (horizontal). For higher latent dimensions with limited KDE samples (e.g., starting at with ), . However, the bias-corrected bandwidth .</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S3.T1.94\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S3.T1.25.5\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_l ltx_border_t\" id=\"S3.T1.25.5.6\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S3.T1.25.5.7\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" colspan=\"3\" id=\"S3.T1.21.1.1\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.8\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.9\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" colspan=\"3\" id=\"S3.T1.22.2.2\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.10\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.11\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" colspan=\"3\" id=\"S3.T1.23.3.3\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.12\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.13\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" colspan=\"3\" id=\"S3.T1.24.4.4\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.14\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.25.5.15\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" colspan=\"3\" id=\"S3.T1.25.5.5\"></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.36.16\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l\" id=\"S3.T1.26.6.1\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_rr\" id=\"S3.T1.36.16.12\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.27.7.2\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.13\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S3.T1.28.8.3\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.14\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.15\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.29.9.4\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.16\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S3.T1.30.10.5\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.17\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.18\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.31.11.6\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.19\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S3.T1.32.12.7\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.20\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.21\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.33.13.8\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.22\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S3.T1.34.14.9\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.23\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.36.16.24\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S3.T1.35.15.10\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.25\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S3.T1.36.16.11\"></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S3.T1.46.26\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_tt\" id=\"S3.T1.46.26.11\">10</td>\n<td class=\"ltx_td ltx_border_rr ltx_border_tt\" id=\"S3.T1.46.26.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.37.17.1\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S3.T1.38.18.2\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.14\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.39.19.3\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.16\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S3.T1.40.20.4\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.17\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.41.21.5\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S3.T1.42.22.6\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.20\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.43.23.7\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S3.T1.44.24.8\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.23\"></td>\n<td class=\"ltx_td ltx_border_tt\" id=\"S3.T1.46.26.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_tt\" id=\"S3.T1.45.25.9\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S3.T1.46.26.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.56.36\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_t\" id=\"S3.T1.56.36.11\">20</td>\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T1.56.36.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.47.27.1\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.48.28.2\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.14\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.49.29.3\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.16\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.50.30.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.17\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.51.31.5\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.52.32.6\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.20\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.53.33.7\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.54.34.8\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.23\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.56.36.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.55.35.9\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.56.36.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.64.44\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_t\" id=\"S3.T1.64.44.9\">40</td>\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T1.64.44.10\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.57.37.1\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.11\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.64.44.12\">0.72</td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.13\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.14\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.58.38.2\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.64.44.16\">0.71</td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.17\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.59.39.3\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.60.40.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.20\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.61.41.5\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.62.42.6\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.23\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.64.44.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.63.43.7\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.64.44.8\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.74.54\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_t\" id=\"S3.T1.74.54.11\">50</td>\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T1.74.54.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.65.45.1\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.66.46.2\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.14\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.67.47.3\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.16\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.68.48.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.17\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.69.49.5\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.70.50.6\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.20\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.71.51.7\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.72.52.8\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.23\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.74.54.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.73.53.9\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.74.54.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.84.64\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_t\" id=\"S3.T1.84.64.11\">70</td>\n<td class=\"ltx_td ltx_border_rr ltx_border_t\" id=\"S3.T1.84.64.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.75.55.1\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.76.56.2\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.14\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.77.57.3\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.16\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.78.58.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.17\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.79.59.5\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.80.60.6\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.20\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.81.61.7\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_t\" id=\"S3.T1.82.62.8\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.23\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S3.T1.84.64.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T1.83.63.9\"></td>\n<td class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T1.84.64.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S3.T1.94.74\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_t\" id=\"S3.T1.94.74.11\">100</td>\n<td class=\"ltx_td ltx_border_b ltx_border_rr ltx_border_t\" id=\"S3.T1.94.74.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.85.65.1\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr ltx_border_t\" id=\"S3.T1.86.66.2\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.14\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.15\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.87.67.3\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.16\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr ltx_border_t\" id=\"S3.T1.88.68.4\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.17\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.18\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.89.69.5\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.19\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr ltx_border_t\" id=\"S3.T1.90.70.6\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.20\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.21\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.91.71.7\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.22\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr ltx_border_t\" id=\"S3.T1.92.72.8\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.23\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_t\" id=\"S3.T1.94.74.24\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S3.T1.93.73.9\"></td>\n<td class=\"ltx_td ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.25\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r ltx_border_t\" id=\"S3.T1.94.74.10\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 106 |
+
"capture": "Table 1: Optimal bandwidths, (estimated using the objective function defined in 7) and corresponding bias-corrected estimations ( scaled by the factor ) for a given latent dimension () and number of KDE samples (). The estimated bandwidth increases with increasing dimensions (vertical) and decreases with increasing sample size (horizontal). For higher latent dimensions with limited KDE samples (e.g., starting at with ), . However, the bias-corrected bandwidth ."
|
| 107 |
+
},
|
| 108 |
+
"2": {
|
| 109 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>FID <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2311.07693v2#bib.bib11\" title=\"\">11</a>]</cite>, and precision-recall <cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2311.07693v2#bib.bib75\" title=\"\">75</a>]</cite> scores of competing methods. The <span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.72.1\">best</span> score is in <span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.73.2\">bold</span>, and the <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.74.3\">second best</span> score is <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T2.75.4\">underlined</span>.</figcaption>\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T2.67\" style=\"width:433.6pt;height:91.8pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-123.3pt,26.1pt) scale(0.637556389959427,0.637556389959427) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.67.67\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.3.3.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.3.3.4\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T2.3.3.3.4.1\">\n<span class=\"ltx_inline-block ltx_parbox ltx_align_middle\" id=\"S4.T2.3.3.3.4.1.1\" style=\"width:42.7pt;\">\n<span class=\"ltx_p\" id=\"S4.T2.3.3.3.4.1.1.1\">Methods</span>\n</span></span></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.3.3.5\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"3\" id=\"S4.T2.1.1.1.1\">MNIST\u2004\u2004\n</th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.3.3.6\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"3\" id=\"S4.T2.2.2.2.2\">CelebA\u2004\u2004\n</th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.3.3.7\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"3\" id=\"S4.T2.3.3.3.3\">CIFAR10\u2004\u2004\n</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.12.12.12\">\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T2.12.12.12.10\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.4.4.1\">\n<span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.4.4.4.1.1\">FID</span> \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.5.5.5.2\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.5.5.5.2.1\">Precision</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.6.6.6.3\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.6.6.6.3.1\">Recall</span></th>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T2.12.12.12.11\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.7.7.7.4\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.7.7.7.4.1\">FID</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.8.8.8.5\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.8.8.8.5.1\">Precision</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.9.9.9.6\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.9.9.9.6.1\">Recall</span></th>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T2.12.12.12.12\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.10.10.10.7\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.10.10.10.7.1\">FID</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.11.11.11.8\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.11.11.11.8.1\">Precision</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.12.12.12.9\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T2.12.12.12.9.1\">Recall</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.21.21.21\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.21.21.21.10\">VAE</td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T2.21.21.21.11\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.13.13.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.14.14.14.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.15.15.15.3\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T2.21.21.21.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.16.16.16.4\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.17.17.17.5\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.18.18.18.6\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T2.21.21.21.13\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.19.19.19.7\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.20.20.20.8\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.21.21.21.9\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.31.31.31\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.22.22.22.1\">\n-TCVAE</td>\n<td class=\"ltx_td\" id=\"S4.T2.31.31.31.11\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.23.23.23.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.24.24.24.3\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.25.25.25.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.31.31.31.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.26.26.26.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.27.27.27.6\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.28.28.28.7\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.31.31.31.13\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.29.29.29.8\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.30.30.30.9\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.31.31.31.10\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.40.40.40\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.40.40.40.10\">RAE</td>\n<td class=\"ltx_td\" id=\"S4.T2.40.40.40.11\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.32.32.32.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.33.33.33.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.34.34.34.3\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.40.40.40.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.35.35.35.4\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.36.36.36.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.37.37.37.6\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.40.40.40.13\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.38.38.38.7\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.39.39.39.8\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.40.40.40.9\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.49.49.49\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.49.49.49.10\">AAE</td>\n<td class=\"ltx_td\" id=\"S4.T2.49.49.49.11\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.41.41.41.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.42.42.42.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.43.43.43.3\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.49.49.49.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.44.44.44.4\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.45.45.45.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.46.46.46.6\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.49.49.49.13\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.47.47.47.7\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.48.48.48.8\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.49.49.49.9\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.58.58.58\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.58.58.58.10\">WAE</td>\n<td class=\"ltx_td\" id=\"S4.T2.58.58.58.11\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.50.50.50.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.51.51.51.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.52.52.52.3\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.58.58.58.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.53.53.53.4\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.54.54.54.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.55.55.55.6\"></td>\n<td class=\"ltx_td\" id=\"S4.T2.58.58.58.13\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.56.56.56.7\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.57.57.57.8\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.58.58.58.9\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.67.67.67\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.67.67.67.10\" style=\"padding-bottom:6.45831pt;\">AVAE</td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T2.67.67.67.11\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.59.59.59.1\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.60.60.60.2\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.61.61.61.3\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T2.67.67.67.12\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.62.62.62.4\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.63.63.63.5\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.64.64.64.6\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T2.67.67.67.13\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.65.65.65.7\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.66.66.66.8\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.67.67.67.9\" style=\"padding-bottom:6.45831pt;\"></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 110 |
+
"capture": "Table 2: FID [11], and precision-recall [75] scores of competing methods. The best score is in bold, and the second best score is underlined."
|
| 111 |
+
},
|
| 112 |
+
"3": {
|
| 113 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Mean entropy of the produced by competing methods on the benchmark datasets. The <span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.32.1\">best</span> score is in <span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.33.2\">bold</span>, and the <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T3.34.3\">second best</span> score is <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.T3.35.4\">underlined</span>. The entropy of the standard normal distribution is used as the ground truth.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.27\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.5.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_l ltx_border_rr ltx_border_t\" id=\"S4.T3.5.3.4\" style=\"padding-bottom:2.15277pt;\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S4.T3.3.1.1\" style=\"padding-bottom:2.15277pt;\">MNIST \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_rr ltx_border_t\" id=\"S4.T3.4.2.2\" style=\"padding-bottom:2.15277pt;\">CelebA \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.5.3.3\" style=\"padding-bottom:2.15277pt;\">CIFAR10 \n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.8.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr ltx_border_tt\" id=\"S4.T3.8.6.4\">VAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S4.T3.6.4.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr ltx_border_tt\" id=\"S4.T3.7.5.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_tt\" id=\"S4.T3.8.6.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.12.10\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr\" id=\"S4.T3.9.7.1\">\n-TCVAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.10.8.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.11.9.3\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.12.10.4\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.15.13\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr\" id=\"S4.T3.15.13.4\">RAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.13.11.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.14.12.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.15.13.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.18.16\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr\" id=\"S4.T3.18.16.4\">AAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.16.14.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.17.15.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.18.16.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.21.19\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr\" id=\"S4.T3.21.19.4\">WAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.19.17.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.20.18.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.21.19.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.24.22\">\n<td class=\"ltx_td ltx_align_center ltx_border_l ltx_border_rr\" id=\"S4.T3.24.22.4\">AVAE</td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.22.20.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_rr\" id=\"S4.T3.23.21.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.24.22.3\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.27.25\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_l ltx_border_rr\" id=\"S4.T3.27.25.4\" style=\"padding-bottom:6.45831pt;\">Standard Normal</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr\" id=\"S4.T3.25.23.1\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_rr\" id=\"S4.T3.26.24.2\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S4.T3.27.25.3\" style=\"padding-bottom:6.45831pt;\"></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 114 |
+
"capture": "Table 3: Mean entropy of the produced by competing methods on the benchmark datasets. The best score is in bold, and the second best score is underlined. The entropy of the standard normal distribution is used as the ground truth."
|
| 115 |
+
},
|
| 116 |
+
"4": {
|
| 117 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Comparison of the performance of the AVAE with <span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.44.1\">different</span> number of KDE samples under multiple metrics for the MNIST and CAIFAR10 datasets.\n</figcaption>\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T4.42\" style=\"width:433.6pt;height:72.7pt;vertical-align:-0.7pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-108.5pt,18.0pt) scale(0.666525532673325,0.666525532673325) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.42.42\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T4.2.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.2.2.2.3\" rowspan=\"2\"><span class=\"ltx_text\" id=\"S4.T4.2.2.2.3.1\">\n<span class=\"ltx_inline-block ltx_parbox ltx_align_middle\" id=\"S4.T4.2.2.2.3.1.1\" style=\"width:42.7pt;\">\n<span class=\"ltx_p\" id=\"S4.T4.2.2.2.3.1.1.1\">KDE samples</span>\n</span></span></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.2.2.2.4\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"4\" id=\"S4.T4.1.1.1.1\">MNIST\u2004\u2004\n</th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.2.2.2.5\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.2.2.2.6\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"4\" id=\"S4.T4.2.2.2.2\">CIFAR10\u2004\u2004\n</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.10.10.10\">\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T4.10.10.10.9\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.3.3.3.1\">\n<span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.3.3.3.1.1\">FID</span> \n</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.4.4.4.2\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.4.4.4.2.1\">Precision</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.5.5.5.3\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.5.5.5.3.1\">Recall</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.6.6.6.4\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.6.6.6.4.1\">MSE</span></th>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T4.10.10.10.10\"></th>\n<th class=\"ltx_td ltx_th ltx_th_column\" id=\"S4.T4.10.10.10.11\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.7.7.7.5\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.7.7.7.5.1\">FID</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.8.8.8.6\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.8.8.8.6.1\">Precision</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.9.9.9.7\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.9.9.9.7.1\">Recall</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.10.10.10.8\"><span class=\"ltx_text ltx_font_smallcaps\" id=\"S4.T4.10.10.10.8.1\">MSE</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.18.18.18\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.18.18.18.9\">1000</td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T4.18.18.18.10\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.11.11.11.1\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.12.12.2\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.13.13.13.3\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.14.14.14.4\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T4.18.18.18.11\"></td>\n<td class=\"ltx_td ltx_border_t\" id=\"S4.T4.18.18.18.12\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.15.15.15.5\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.16.16.16.6\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.17.17.17.7\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.18.18.18.8\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.26.26.26\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.26.26.26.9\">2000</td>\n<td class=\"ltx_td\" id=\"S4.T4.26.26.26.10\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.19.19.19.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.20.20.20.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.21.21.21.3\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.22.22.22.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T4.26.26.26.11\"></td>\n<td class=\"ltx_td\" id=\"S4.T4.26.26.26.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.23.23.23.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.24.24.24.6\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.25.25.25.7\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.26.26.26.8\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.34.34.34\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.34.34.34.9\">5000</td>\n<td class=\"ltx_td\" id=\"S4.T4.34.34.34.10\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.27.27.27.1\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.28.28.28.2\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.29.29.29.3\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.30.30.30.4\"></td>\n<td class=\"ltx_td\" id=\"S4.T4.34.34.34.11\"></td>\n<td class=\"ltx_td\" id=\"S4.T4.34.34.34.12\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.31.31.31.5\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.32.32.32.6\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.33.33.33.7\"></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.34.34.34.8\"></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.42.42.42\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.42.42.42.9\" style=\"padding-bottom:6.45831pt;\">10000</td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T4.42.42.42.10\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.35.35.35.1\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.36.36.36.2\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.37.37.37.3\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.38.38.38.4\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T4.42.42.42.11\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_border_b\" id=\"S4.T4.42.42.42.12\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.39.39.39.5\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.40.40.40.6\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.41.41.41.7\" style=\"padding-bottom:6.45831pt;\"></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.42.42.42.8\" style=\"padding-bottom:6.45831pt;\"></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 118 |
+
"capture": "Table 4: Comparison of the performance of the AVAE with different number of KDE samples under multiple metrics for the MNIST and CAIFAR10 datasets.\n"
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
"image_paths": {
|
| 122 |
+
"1": {
|
| 123 |
+
"figure_path": "2311.07693v2_figure_1.png",
|
| 124 |
+
"caption": "Figure 1: The metric multidimensional scaling (mMDS) [101] plot in 2D of the latent representations (\ud835\udcb5\u2208\u211d16\\mathcal{Z}\\in\\mathbb{R}{}^{16}caligraphic_Z \u2208 blackboard_R start_FLOATSUPERSCRIPT 16 end_FLOATSUPERSCRIPT) produced by the VAE [22], \u03b2\ud835\udefd\\betaitalic_\u03b2-TCVAE [72] and the AVAE (proposed method) on the MNIST dataset [94]. Samples from the target distribution, \ud835\udca9\u2062(\ud835\udfce,\ud835\udc08)\ud835\udca90\ud835\udc08\\mathcal{N}\\left(\\mathbf{0},\\mathbf{I}\\right)caligraphic_N ( bold_0 , bold_I ), are used as the ground truth. The regions of low probability and unwanted aggregation of data points in different parts of the latent space of the VAE and \u03b2\ud835\udefd\\betaitalic_\u03b2-TCVAE clearly show the mismatch with the ground truth. The AVAE closely matches the target distribution corroborated by empirical evaluations.",
|
| 125 |
+
"url": "http://arxiv.org/html/2311.07693v2/extracted/5903027/images/Distribution_matching.png"
|
| 126 |
+
}
|
| 127 |
+
},
|
| 128 |
+
"validation": true,
|
| 129 |
+
"references": [],
|
| 130 |
+
"url": "http://arxiv.org/html/2311.07693v2"
|
| 131 |
+
}
|
20241004/2311.09756v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2311.10040v2.json
ADDED
|
@@ -0,0 +1,545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "A characterization of efficiently compilable constraint languages",
|
| 3 |
+
"abstract": "A central task in knowledge compilation is to compile a\nCNF-SAT instance into a succinct representation\nformat that allows efficient operations such as testing satisfiability, counting, or enumerating all\nsolutions.\nUseful representation formats studied in this area range from ordered\nbinary decision diagrams (OBDDs) to circuits in decomposable negation normal\nform (DNNFs).",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "One of the main aims of knowledge compilation is to encode\nsolution sets of computational problems into a succinct but usable\nform [DarwicheM02]. Typical target formats for this compilation\nprocess are different forms of decision\ndiagrams [Wegener00] or restricted classes of Boolean\ncircuits. One of the most general representation formats are circuits in\ndecomposable negation normal form (DNNF), which have been\nintroduced in [Darwiche01] as a compilation target for Boolean functions.\nRelated notions, which also rely on the central decomposability\nproperty, have been independently considered in databases [OlteanuZ15, Olteanu16] and\ncircuit complexity [RazSY08, AlonKV20]. Besides these, DNNF circuits and related\ncompilation classes have also been proven useful in areas like probabilistic inference [BroeckS17], constraint satisfaction [KoricheLMT15, BerkholzV23, AmilhastreFNP14, MateescuD06], MSO evaluation [AmarilliBJM17], QBF\nsolving [CapelliM19], to\nname a few.\nThe DNNF representation format has become a popular\ndata structure because it has a particularly good balance\nbetween generality and usefulness [DarwicheM02].\nThere has also been a large amount of practical work on compiling solution\nsets into DNNF or its fragments, see\ne.g. [LagniezM17, Darwiche04, MuiseMBH12, ChoiD13, OztokD15, KoricheLMT15]. In\nall these works, it assumed that the solutions to be compiled are\ngiven as a system of constraints, often as a set of disjunctive\nBoolean clauses, i.\u2009e., in conjunctive normal form (CNF).\nIn this setting, however, strong lower bounds are known: it was shown that\nthere are CNF-formulas whose representation as DNNF requires\nexponential\nsize [BovaCMS14, BovaCMS16, Capelli17, AmarilliCMS20]. The\nconstraints out of which the hard instances\nin [BovaCMS14, AmarilliCMS20, Capelli17] are constructed are very\neasy\u2014they are only monotone clauses of two variables. Faced with\nthese negative results, it is natural to ask if there are any classes\nof constraints that guarantee efficient compilability into DNNF.\nWe answer this question completely and prove a tight characterization for every constraint language .\nWe first examine the combinatorial property of strong blockwise decomposability and show that if a constraint language has this\nproperty, any system of constraints over can be compiled into\na DNNF representation of linear size within polynomial time.\nOtherwise, there are systems of\nconstraints that require exponential size DNNF representations. In the\ntractable case, one can even compile to the restricted fragment of\nfree decision diagrams (FDD) that are in general known to be\nexponentially less succinct than DNNF [Wegener00, DarwicheM02].\nWe also consider the important special case of so-called structured\nDNNF [PipatsrisawatD08] which are a generalization of the\nwell-known ordered binary decision diagrams [Bryant86]. We show\nthat there is a restriction of strong blockwise decomposability that\ndetermines if systems of constraints over a set can be\ncompiled into structured DNNF in polynomial time. In the tractable case, we can in fact\nagain compile into a restricted fragment, this time ordered decision\ndiagrams (ODD).\nFurthermore, we separate both notions of constraint languages admitting\nstructured and only unstructured representations and thus give a complexity picture of\nthe tractability landscape. We also show that it is decidable, whether\na given constraint language strongly (uniformly) blockwise\ndecomposable (a question left open in the conference version [DBLP:conf/stacs/BerkholzMW24]).\nLet us stress that all our lower bounds provide unconditional size lower\nbounds on (structured) DNNFs and thus do not depend on unproven complexity assumptions."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Blockwise decomposability",
|
| 21 |
+
"text": "In this section we introduce our central notion of blockwise and\nuniformly blockwise decomposable constraints and formulate our main theorems that lead to a characterization of\nefficiently representable constraint languages.\nThe first simple insight is the following. Suppose two constraints\n and with disjoint scopes are efficiently\nrepresentable, e.\u2009g., by a small ODD. Then their Cartesian product\n also has a small ODD: given an assignment , we just need\nto check independently whether and , for\nexample, by first using the ODD for and then using the\nODD for . Thus, if a constraint can be expressed as a\nCartesian product of two constraints, we only have to investigate\nwhether the two parts are easy to represent. This brings us to our\nfirst definition.\nLet be a constraint and be a partition of its scope.\nWe call decomposable w.r.t. if\n.\nA constraint is indecomposable if it is only\ndecomposable w.r.t. trivial partitions where\n or for .\nNext, we want to relax this notion to constraints that are \u201calmost\u201d\ndecomposable. Suppose we have four relations of arity \nand of arity and let be two distinct domain\nelements. Let\nThe constraint may now not be decomposable in any\nnon-trivial variable partition. However, after fixing values for and the\nremaining selection \nis\ndecomposable in for any pair . Thus, an ODD could first read values for and then use ODDs\nfor and if , ODDs for\n and if , or reject\notherwise. This requires, of course, that and\n, as well as and , have small\nODDs over the same variable order. For FDDs and DNNFs, however, we would\nnot need this requirement on the variable orders.\nTo reason about the remaining constraints after two variables have been fixed, it is helpful to\nuse the following matrix notation. Let be a\nconstraint and be two variables in its scope. The selection\nmatrix is the matrix where the rows and\ncolumns are indexed by domain elements and the entries are\nthe constraints\nLet and a constraint with\nconstraint relation\n The selection matrix in and is depicted below,\nwhere the first line and column\nare the indices from and the matrix entries contain the\nconstraint relations of the corresponding constraints :\n\u220e\nA block in the selection matrix is a subset of rows and columns . We also associate with a block the\ncorresponding constraint\n.\nA selection matrix is a proper block matrix, if there exist\npairwise disjoint and pairwise disjoint\n such that for all :\nThe selection matrix in Example 3 ###reference_### is a proper\nblock matrix with , , , .\n\u220e\nWe will make use of the following alternative characterization of\nproper block matrices.\nThe simple proof is similar to [DyerR13, Lemma 1].\nA selection matrix is a proper block matrix\nif and only if it has no -submatrix with exactly one\nempty\nentry.\nLet be a proper block matrix and , be the pairwise disjoint\nsets provided by the definition.\nA -submatrix may intersect , or blocks. If it intersects blocks, then all of the entries of are empty.\nIf intersects only one block, then either , or entries are in that block and thus non-empty. So the number of empty entries in is , or .\nIf intersect two blocks, then exactly of its entries lie in those blocks and are thus non-empty. So has empty entries in that case.\nOverall, may have , , , or empty entries, so it satisfies the claim.\nFor the other direction, we must show how to find the block\nstructure. Take one non-empty entry and reorder the rows to and\ncolumns to \nsuch that this entry is in the first column and first row, and the\nfirst row (column) starts with () non-empty entries followed by\nempty entries. Consider for all the -submatrix indexed\nby and . Since it does not have exactly\none empty entry, we get:\nIf and , then .\nIf and , then .\nIf and , then .\nThus, we can choose \nand proceed with the submatrix on rows and columns inductively.\n\u220e\nNow we can define our central tractability criterion for constraints\nthat have small ODDs, namely that any selection matrix is a proper\nblock matrix whose blocks are decomposable over the same variable partition\nthat separates and .\nA constraint is uniformly blockwise\ndecomposable in if is a proper block\nmatrix with partitions , and there is a partition of with and such that each block is decomposable in . A constraint is uniformly blockwise decomposable if it is uniformly decomposable in any pair .\nIn the non-uniform version of blockwise decomposability, it is allowed that the\nblocks are decomposable over different partitions. This property will\nbe used to characterize constraints having small FDD representations.\nA constraint is blockwise decomposable\nin if is a proper block matrix with partitions\n, and for each \nthere is a partition of with and\n\nsuch that each block \nis decomposable in\n.\nA constraint is blockwise decomposable\nif it is decomposable in any pair .\nNote that every uniformly blockwise decomposable relation is also\nblockwise decomposable. The next example illustrates that the converse\ndoes not hold.\nConsider the 4-ary constraint relations\nThen the selection matrix of the constraint \nhas two non-empty blocks:\nThe first block \nis decomposable in and , while the second block is\ndecomposable in and . Thus, the constraint is blockwise\ndecomposable in , but not uniformly blockwise decomposable.\n\u220e\nFinally, we transfer these characterizations to relations and constraint languages:\nA -ary relation is (uniformly) blockwise decomposable, if the\nconstraint is (uniformly) blockwise decomposable for\npairwise distinct variables .\nA constraint language is (uniformly) blockwise decomposable\nif every relation in is (uniformly) blockwise\ndecomposable.\nA constraint language is strongly (uniformly) blockwise\ndecomposable\nif its co-clone is (uniformly) blockwise\ndecomposable.\nNow we are ready to formulate our main theorems. The first one states that\nthe strongly uniformly blockwise decomposable constraint languages are\nprecisely those that can be efficiently compiled to a structured\nrepresentation format (anything between ODDs and structured\nDNNFs).\nLet be a constraint language.\nIf is strongly uniformly blockwise decomposable, then there is a polynomial\ntime algorithm that constructs an\nODD for a given CSP()-instance.\nIf is not strongly uniformly blockwise decomposable,\nthen there is a family of CSP()-instances such that any structured DNNF for has size .\nOur second main theorem states that the larger class of strongly\nblockwise decomposable constraint languages captures CSPs that can be\nefficiently compiled in an unstructured format between FDDs and DNNFs.\nLet be a constraint language.\nIf is strongly blockwise decomposable, then there is a polynomial\ntime algorithm that constructs an\nFDD for a given CSP()-instance.\nIf is not strongly blockwise decomposable,\nthen there is a family of CSP()-instances such that any DNNF for has size .\nMoreover, we will show that both properties (strong blockwise decomposability\nand strong uniform blockwise decomposability) are decidable\n(Theorem 8.1 ###reference_###) and that for the relation from\nExample 3 ###reference_### separates both notions (Theorem 8.4 ###reference_###)."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Properties of the Decomposability Notions",
|
| 27 |
+
"text": "In this section we state important properties about (uniform)\nblockwise decomposability.\nWe start by observing that these notions\nare closed under projection and selection."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "4.1",
|
| 31 |
+
"parent_section_id": "4",
|
| 32 |
+
"section_name": "Basic Properties",
|
| 33 |
+
"text": "We start with some basic properties that will be useful throughout the rest of this paper.\nLet be a blockwise decomposable, resp. uniformly blockwise decomposable, constraint and let , , and . Then the\nprojection as well as the selection are also\nblockwise decomposable, resp. uniformly blockwise decomposable.\nIf is (uniformly) blockwise decomposable consider . First note that each entry is non-empty if is\nnon-empty. Thus, is a proper block\nmatrix with the same block structure and\n as . Furthermore, if a block is decomposable in , then the corresponding\nblock in is decomposable in\n. It follows that is\n(uniformly) decomposable.\nFor the selection \u201c\u201d first consider the case where or\n. Then the selection matrix is\na submatrix of and hence (uniformly)\nblockwise decomposable if was (uniformly)\nblockwise decomposable.\nIn case , the matrix \nhas the same block structure as and its\nentries decomposable w.r.t. the same partitions. Hence is also (uniformly) blockwise decomposable in the\npair .\n\u220e\nIf a constraint language is strongly (uniformly) blockwise\ndecomposable, then its individualization is also strongly (uniformly) blockwise decomposable.\nConsider a pp-formula defining\n where with Lemma 4.1 ###reference_### we may assume that the formula contains no projection. Then the formula can be rewritten as\n, where\n is a -formula. Since by assumption\n is (uniformly) blockwise decomposable, the same holds for\n by repeatedly applying the selection and Lemma 4.1 ###reference_###.\n\u220e\nWe next show that when dealing with blockwise decomposable relations, we can essentially assume that they are binary. To this end, we define for every constraint the set of binary projections\nLet us compute for the 4-ary relation from\nExample 3 ###reference_###, see\nFigure 1 ###reference_###. For the unary relations, observe\nthat every column in can take all\nfour values , so the only unary relation in is\n. The six possible binary projections lead\nto three different constraint relations:\nprojecting to and yields the relation\n.\nprojecting to and yields the relation\n.\nprojecting to and yields the relation\n. \u220e\nFor blockwise decomposable constraints, binary projections do not change solutions in the following sense.\n{lemma}\nLet be a blockwise decomposable constraint. Then\nWe assume w.\u2009l.\u2009o.\u2009g. that contains \ndistinct variables and let . First observe that by the definition of projections.\nFor the other direction, we let and need to show . For this, we show by induction on :\nNote that the lemma follows from this claim by setting and that the base case\n follows from the definition of . So let and\nassume that (9 ###reference_###) holds for all with . Fix an\narbitrary with , let be three variables in ,\nand set for . By induction assumption we get\n, which\nimplies that there are such that\n.\nLet . In order to show that , we consider the selection\nmatrix . Since we have that ,\n, and are non-empty,\nthese entries lie in the same block, which is decomposable into some\npartition of with and . Moreover,\n and . Decomposability implies that which is\nthe same statement as .\n\u220e\nLet be a constraint language. Then we define \nto be the constraint language consisting of all relations of arity at most that are pp-definable over . Equivalently, consists of all relations that are constraint relations of constraints in for a relation . Observe that even though is infinite, we have that is finite, since it contains only relations of arity at most two.\nConsider again the relation and its projections and \nfrom Example 3 ###reference_###, 4.1 ###reference_### and\nFigure 1 ###reference_###.\nWe let and want to compute . We\nfirst observe that instead of we could use the two binary\nprojections and , that is, for we have . To see this, observe first that we have already seen in Example 4.1 ###reference_### that . For the other direction, we just have to express as a pp-formula in which we do by\nSo to compute we can equivalently analyze which is easier to understand. Note first that the only unary relation that is pp-defiable over is , so we only have to understand binary relations. To this end, we assign a graph to every -formula where the variables are the variables of and there is an edge between two variables and if there is a constraint or in . In the former case, we label the edge with , in the latter with . Note that an edge can have both labels. An easy induction shows the following: if two variables are connected by a path whose edges are all labeled with , then in any solution in which takes value , takes value as well and the same statement is true for . Moreover, if and are not connected by such a path, then there is a solution in which takes value and takes value and vice versa. An analogous statement is true for and and paths labeled by .\nFrom these observations, it follows that the only binary relations\nthat are pp-definable over and that is not already in\n are the equality relation and the trivial relation .\nThus we get\n. \u220e\nThe following result shows that for strongly blockwise decomposable constraint languages, we may assume that all relations are of arity at most two.\nLet be a strongly blockwise decomposable language. Then .\nWe first show that . For this, it suffices to show that . So consider a relation . By definition, is in particular pp-definable over , so which shows the first direction of the proof.\nFor the other direction, is suffices to show that . So let of arity . For the constraint we have by Lemma 4.1 ###reference_### that where . Since the relations are all in , this shows that pp-definable over which proves the claim.\n\u220e"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4.2",
|
| 37 |
+
"parent_section_id": "4",
|
| 38 |
+
"section_name": "The Relation to Strong Balance",
|
| 39 |
+
"text": "Next, we will show that blockwise decomposable relations allow for\nefficient counting of solutions by making a connection to the work of Dyer\nand Richerby [DyerR13]. To state their dichotomy theorem, we need the following\ndefinitions. A constraint is\nbalanced (w.r.t. ), if the \nmatrix defined by\n is a proper block matrix, where each block has rank one. A constraint language is strongly\nbalanced if every at least ternary pp-definable constraint is balanced.\n[Effective counting dichotomy [DyerR13]]\nIf is strongly balanced, then there is a polynomial time\nalgorithm that computes for a given\nCSP()-instance .\nIf is not strongly balanced, then counting solutions\nfor CSP()-instances is P-complete.\nMoreover, there is a polynomial time algorithm that decides if a given\nconstraint language \nis strongly balanced.\nOur next lemma\nconnects\nblockwise decomposability with strong balance and leads to a\nnumber of useful corollaries in the next section. We sketch the proof for the case\nEvery strongly blockwise decomposable constraint language is\nstrongly balanced.\n\nWe first sketch the proof for the case\nwhen is ternary, before proving the general case\n{proofsketch}\nLet be strongly blockwise decomposable and a\npp-defined ternary constraint. Then the selection matrix is a block\nmatrix, where each block is decomposable in some\n, either or . In any case, for the\ncorresponding block in\n we have\n where and . Thus, the block has rank 1.\nTo prove the general case of Lemma 4.2 ###reference_###, it will be convenient to\nhave a generalization of the selection matrix . So consider a relation in variables and domain . Then is the -matrix whose rows are indexed by assignments , whose column are indexed by the assignments and that have as entry at position the constraint\nWe say that is blockwise set-decomposable with respect to and if is a proper block matrix and for every non-empty block , the selection is decomposable such that no factor contains variables of and . We say that is blockwise set-decomposable if it is blockwise set-decomposable for all choices of disjoint variable sets.\nLet be a relation. Then is blockwise set-decomposable if and only if it is blockwise decomposable.\nIf is blockwise set-decomposable, then it is by definition also blockwise decomposable. It only remains to show the other direction. So assume that is blockwise decomposable. We proceed by induction on . If , then and each consist of one variable, so there is nothing to show.\nNow assume that w.l.o.g. . Let be the variables of not in . We first show that for all choices of the matrix is a proper block matrix with the criterion of Lemma 3 ###reference_###. Consider assignments such that , , . We have to show that as well. Let be one of the variables in and let denote the other variables in . Let and denote the restriction of to and , respectively. We have , and , so by induction , so in all four entries lie in a block . By decomposability of , there are relations such that and analogously for the other entries of . Since and , we have that and and thus also . It follows that and thus is a block matrix.\nIt remains to show that the blocks of decompose. So let and be the index sets of a block in the matrix. Consider again and let the notation be as before. Let . Then we have for all that , so by induction we have that for every there is a relation and for every there is a relation such that . Moreover, all are in the same variables and the same is true for all the . If is a variable of the , then so we get the decomposition of the block in directly. Otherwise, so if is a variable of the , we have where we get from by projecting away . Again, we get decomposability of the block and it follows that is blockwise set-decomposable.\n\u220e\nThe following connection between balance and blockwise decomposability is now easy to show.\nEvery blockwise decomposable relation is balanced.\nLet be blockwise decomposable. Let be variable sets. We want to show that is balanced. Let be a block. Since, by Lemma 4.2 ###reference_### the relation is blockwise set-decomposable, every entry of can be written as where the and and are the relations given by the decomposability of . It follows directly that is a rank- matrix which shows the lemma.\n\u220e\nLemma 4.2 ###reference_### is now a direct consequence of Lemma 4.2 ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.3",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Consequences of the Relation to Strong Balance",
|
| 45 |
+
"text": "In this section, we will use Lemma 4.2 ###reference_### to derive useful properties of strongly blockwise decomposable languages.\nLet be a strongly blockwise decomposable constraint\nlanguage.\nGiven a\n-formula and a (possibly empty) partial\nassignment , the number of solutions that extend\n can be computed in polynomial time.\nGiven a\npp-formula over and , the blocks\nof can be computed in polynomial time.\nGiven a\npp-formula over , the indecomposable factors\nof can be computed in polynomial time.\nClaim 1 ###reference_i1### follows immediately from the combination of\nLemma 4.2 ###reference_### with Theorem 4.2 ###reference_###\nand the fact that strongly blockwise decomposable constraint\nlanguages are closed under selection\n(Corollary 4.1 ###reference_###). For Claim 2 ###reference_i2### let for some -formula . To compute the blocks of , we can use Claim 1 ###reference_i1### to compute for every and\n whether and hence .\nTo prove Claim 3 ###reference_i3###, note that by Claim 2 ###reference_i2### we can\ncalculate the block structure of for every variable pair\n. Consider the graph with \nand edges between and if has at least\ntwo non-empty blocks. If for some and ,\nthen and must appear in the same indecomposable factor of\n. Let be the connected components of . All variables of one connected component must appear in the same factor, so is indecomposable. We claim that\n.\nIt suffices to show for one connected component ,\nsince this can be used iteratively to show the claim. We have that for\nany and , the selection matrix\n has only one block, which can be decomposed into\n, with and , so that\nSince , no variable from the same connected component\n can be in and therefore\n.\nSince we can obtain such a decomposition for every and , and the intersection of all these decompositions\ninduces a decomposition where is separated from any , we get\nthat .\n\u220e\nWe close this section by stating the following property that applies only\nto uniformly decomposable constraints.\nLet be a constraint that is uniformly blockwise decomposable in and . Then there exist and with such that\nFurthermore, if is defined by a pp-formula over a strongly\nuniformly blockwise decomposable , then and\n can be computed from in polynomial time.\nSince is uniformly blockwise decomposable in and , there exist and with such that for all we have\nWe will show (11 ###reference_###) for this choice of and . First, if satisfies , then by definition of projections, we have for every that satisfies , which yields the containment of (11 ###reference_###). Now let . Since we get and thus\nNow\n implies that\n. Analogously,\n implies that\n. Thus we get\n\nand .\nWe now show how to find and in polynomial\ntime in if given as pp-definition .\nTo this end, we first compute the block structure of with Corollary 4.3 ###reference_###.2 ###reference_i2###. Let these blocks. Then we compute for every the indecomposable factors of the block by applying Corollary 4.3 ###reference_###.3 ###reference_i3### to the formula which we can do by Corollary 4.1 ###reference_###. Denote for every the corresponding variable partition by .\nIt remains to compute a variable set with such that for all we have that is a factor of and . To this end, observe that if there is an and a set that contains two variables , then either and must both be in or they must both be in . This suggests the following algorithm: initialize . While there is an and a set such that there are variables and , add to . We claim that when the loop stops, we have a set with the desired properties. First, observe that we have for all that is a factor of , because otherwise we would have continued adding elements. Moreover, by construction. Finally, since a decomposition with the desired properties exists by what we have shown above, the algorithm will never be forced to add to . This proves the claim and thus complete the proof of the lemma.\n\u220e"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Algorithms",
|
| 51 |
+
"text": ""
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5.1",
|
| 55 |
+
"parent_section_id": "5",
|
| 56 |
+
"section_name": "Polynomial time construction of ODDs for strongly uniformly blockwise decomposable constraint languages",
|
| 57 |
+
"text": "The key to the efficient construction of ODD for uniformly\nblockwise decomposable constraints is the following lemma, which\nstates that any such constraint is equivalent to a treelike conjunction of binary projections\nof itself.\n[Tree structure lemma]\nLet be a constraint that is\nuniformly blockwise decomposable. Then there is an undirected tree\n with vertex set such that\nFurthermore, can be calculated in polynomial\ntime in , if is uniformly blockwise decomposable\nand given as pp-formula over a strongly uniformly blockwise decomposable language .\nWe first fix and arbitrarily and apply\nLemma 4.3 ###reference_### to obtain a tri-partition (, , ) of\n such that . We add the edge to .\nBy Lemma 4.1 ###reference_###,\n and are uniformly blockwise\ndecomposable, so Lemma 4.3 ###reference_###\ncan be recursively applied on both projections. For (say)\n we fix , choose an arbitrary , apply Lemma 4.3 ###reference_###, and add the\nedge to . Continuing this\nconstruction recursively until no projections with more than\ntwo variables are left yields the desired result.\n\u220e\nFrom the tree structure of Lemma 5.1 ###reference_###, we will construct small ODDs by starting with\na centroid, i.\u2009e., a variable whose removal splits\nthe tree into connected components of at most \nvertices each. From the tree structure lemma it follows that we can\nhandle the (projection on the) subtrees independently. A recursive\napplication of this idea leads to an ODD of size .\nLet be a CSP()-instance and the\ncorresponding -formula. By Lemma 5.1 ###reference_###\nwe can compute a tree such that\n. By Corollary 4.3 ###reference_###.1 ###reference_i1### we can explicitly\ncompute, for each , a binary relation\n such that\n. Now we define\nthe formula\n and\nnote that . It remains to\nshow that such tree-CSP instances can be efficiently compiled to\nODDs. This follows from the following inductive claim, where for\ntechnical reasons we also add unary constraints for\neach vertex (setting implies the theorem).\nLet be a tree on vertices and be a formula. Then there is an order ,\ndepending only on , such that an ODD< of size at most deciding can be computed\nin .\nWe prove the claim by induction on . The case is\ntrivial.\nIf let be\na centroid in this tree, that is a node whose removal splits the tree into\n\nconnected components ,\u2026, of at most vertices each. It is a classical result that every tree has at least one centroid [Jordan1869]. Let , \u2026, be vectors of the variables\nin these components, so (, , \u2026,\n) partitions .\nLet be the neighbors of in . We want to branch on and recurse on the connected components\n. To this end, for each assignment we remove for each\nneighbor those values that cannot be extended to . That\nis, .\nNow we let \nand observe that\nBy induction assumption, for each there is an order\n of such that each has an\nODD of size for .\nNow we start our ODD for with branching on \nfollowed by the sequential combination of ODD, \u2026, ODD for each assignment\n to . This completes the inductive construction. Since\nits size is bounded by\n, the following easy\nestimations finish the proof of the claim (recall that ):"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "5.2",
|
| 61 |
+
"parent_section_id": "5",
|
| 62 |
+
"section_name": "Polynomial time construction of FDDs for strongly blockwise decomposable constraint languages",
|
| 63 |
+
"text": "For blockwise decomposable constraints that are not uniformly blockwise\ndecomposable, a good variable order may depend on the values assigned\nto variables that are already chosen, so it is not surprising that the\ntree approach for ODDs does not work in this setting.\nFor the construction of the FDD, we first compute the indecomposable\nfactors (this can be done by Corollary 4.3 ###reference_###.3 ###reference_i3### and treat them\nindependently. This, of course, could have also been done for the ODD\nconstruction. The key point now is how we treat the indecomposable\nfactors: every selection matrix for a (blockwise decomposable) indecomposable constraint\nnecessarily has two non-empty blocks. But then every row\n must have at least one empty entry\n. This in turn implies\nthat, once we have chosen , we can exclude as a possible\nvalue for ! As we have chosen arbitrarily, this also applies to\nany other variable (maybe with a different domain element ). So the set of possible values for every variable\nshrinks by one and since the\ndomain is finite, this cannot happen too\noften. Algorithm 1 ###reference_### formalizes this recursive idea. To\nbound the runtime of this algorithm, we analyze the size of the\nrecursion tree.\nThe algorithm is formalized in Algorithm 1 ###reference_###. It is\nstraightforward to verify that this algorithm computes an FDD that\ndecides . It remains to discuss the size of the\nFDD and the running time. First note that the decomposition into\nindecomposable factors (Line 10 ###reference_10###) can be computed in polynomial time by\nCorollary 4.3 ###reference_###.3 ###reference_i3###. Moreover, (non-)emptiness of the entries of the selection\nmatrices (Line 19 ###reference_19###) can be tested in\npolynomial time by\nCorollary 4.3 ###reference_###.1 ###reference_i1###. Hence, every call has only polynomial computation\noverhead and we proceed to bound the total number of recursive calls.\nTo this end, let us bound the size of the recursion tree, starting by bounding its depth.\nAs discussed above, the crucial point is that each considered\nselection matrix in Line 19 ###reference_19###\nhas at least two blocks, otherwise, the relation would have been\ndecomposable, because by definition of blockwise decomposability every block of is decomposable. Therefore, the test for empty entries will succeed at\nleast once and each considered variable domain shrinks. Therefore, in every root-leaf-path in the recursion tree, there are at most recursive in Line 22 ###reference_22###. Moreover, on such a path there cannot be two consecutive calls from Line 12 ###reference_12###, because we decompose into indecomposable factors before any such call. It follows that the recursion tree has depth at most .\nLet the height of a node in the recursion tree be the distance to the furthest leaf in the subtree below . Let be the number of variables of the constraint in that call. We claim that the number of leaves below is at most . We show this by induction on . If , then is a leaf, so we make no further recursive calls. This only happens if and the claim is true. Now consider . Let be the children of . If in we make a recursive call as in Line 12 ###reference_12###, then . Also for all we have and the number of leaves below is bounded by . If in we make a recursive call in Line 22 ###reference_22###, then we know that , because we make at most calls. Moreover, we have again that , so the number of leaves below is bounded by which completes the induction and thus proves the claim.\nIt follows that the recursion tree of the algorithm has at most leaves and thus at most nodes. Since we add at most one FDD-node per recursive call, this is also a bound for the size of the computed FDD.\n\u220e"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Lower Bounds",
|
| 69 |
+
"text": "In this section, we will prove the lower bounds of Theorem 3 ###reference_### and Theorem 3 ###reference_###. In the proofs, we will use the approach developed in [BovaCMS16] that makes a connection between DNNF size and rectangle covers. We will use the following variant:\nLet be a DNNF of size representing a constraint and let . Then, for every , there is a --balanced rectangle cover of of size . Moreover, if is structured, then the rectangles in the cover are all with respect to the same variable partition.\n\nThe proof of Lemma 6 ###reference_### is very similar to\nexisting proofs in [BovaCMS16], so we defer it to the appendix.\n{toappendix}"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6.1",
|
| 73 |
+
"parent_section_id": "6",
|
| 74 |
+
"section_name": "Proof of Lemma 6",
|
| 75 |
+
"text": "In the proof of Lemma 6 ###reference_###, we will again use the concept of proof trees, see Section 2 ###reference_###.\nThe idea of the proof of Lemma 6 ###reference_### is to partition in the representation (1 ###reference_###), guided by the circuit . To this end, we introduce some more notation. Let, for every gate , denote the set of proof trees of that contain . Moreover, let denote the variables appearing in and the variables that appear in below the gate . Finally, let .\nis a rectangle w.r.t. .\nEvery proof tree in can be partitioned into a part below and the rest. Moreover, any such proof trees can be combined by combining the part below from and the rest from . The claim follows directly from this observation.\n\u220e\nWith Claim 6.1 ###reference_###, we only have to choose the right gates of to partition to prove Lemma 6 ###reference_###. To this end, we construct an initially empty rectangle cover iteratively: while still captures an assignment , choose a proof tree capturing (which is guaranteed to exist by (1 ###reference_###)). By descending in , choose a gate such that a fraction of the variables in between and 222There is a small edge case here in which does not contain a third of the variables in . In that case, we simply take as a rectangle, balancing it by adding the non-appearing variables appropriately.. Add to , delete from and repeat the process. When the iteration stops, we have by Claim 6.1 ###reference_### and the choice of constructed a set of -balanced rectangle covers. Moreover, by construction. Finally, since in the end does not capture any assignments anymore, every assignment initially captured must have been computed by one of the proof trees of that got destroyed by deleting one of its gates . Thus and we have\nwhich shows the claim of Lemma 6 ###reference_### for the unstructured case.\nIf is structured, we choose the vertices in the iteration slightly differently. Let be the v-tree of . Then we can choose a vertex in that has between one and two thirds of the vertices in as labels on leaves below . Let be the variable in below and let be the rest of the variables. Now in the construction of , in the proof tree we can find a gate below which there are only variables in and which is closest to the root with this property. Then, by Claim 6.1 ###reference_###, is a rectangle w.r.t. and thus in particular -balanced. Since all rectangles we choose this way are with respect to the same partition which depends only on , the rest of the proof follows as for the unstructured case."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "6.2",
|
| 79 |
+
"parent_section_id": "6",
|
| 80 |
+
"section_name": "Lower Bound for DNNF",
|
| 81 |
+
"text": "In this Section, we show the lower bound for Theorem 3 ###reference_### which we reformulate here.\nLet be a constraint language that is not strongly blockwise decomposable. Then there is a family of -formulas of size and\n such that any DNNF for has\nsize at least .\nIn the remainder of this section, we show Proposition 6.2 ###reference_###, splitting the proof into two cases.\nFirst, we consider the case where is not a proper block matrix.\nLet be a constraint such that is not a proper block matrix. Then\nthere is a family of -formulas and\n such that any DNNF for has size at least .\nIn the proof of Lemma 6.2 ###reference_###, we will use a specific family of graphs. We remind the reader that a matching is a set of edges in a graph that do not share any end-points. The matching is called induced if the graph induced by the end-points of the matching contains exactly the edges of the matching.\n{lemma}\nThere is an integer and constants , such that there is an infinite family \nof bipartite graphs with maximum degree at most such that for each set with there is an induced matching of size at least in which each edge has exactly one endpoint in .\n\nThe proof of Lemma 6.2 ###reference_### uses a specific class of so-called expander graphs. Since the arguments are rather standard for the area, we defer the proof to Appendix 6.3 ###reference_###.\n{toappendix}"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6.3",
|
| 85 |
+
"parent_section_id": "6",
|
| 86 |
+
"section_name": "Bipartite graphs with large induced matchings over every cut",
|
| 87 |
+
"text": "In this appendix, we will show how to prove Lemma 6.2 ###reference_###. The construction is based on expander graphs in a rather standard way, but since we have not found an explicit reference, we give it here for completeness.\nWe will use the following construction.\nThere are constants , ,\n such that there is a class of bipartite graphs\n of degree at most and with such that for every set or with we have .\nThe proof is an adaption of [MotwaniR95, Theorem 5.6] to our slightly different setting, using the probabilistic method. We will choose the constants later to make the calculations work, so we let them be variable for now. We fix and construct as follows: Set and . Then choose permutations of and set . Then is by construction bipartite and has maximum degree of as required, and it remains only to show the condition on the neighborhoods.\nLet be the random event that there is a set\n of size with at most \nneighbors. There are possible choices of\nsuch a set . Also, for every there are sets in which the neighbors of can be in\ncase is true for . Since the probability of\n only depends on the size of but not\non itself, we get\nWe have that if and only if the permutations all map into . So let us first bound the number of permutations which map into : we first choose the elements into which is mapped; there are . Then we map the rest of arbitrarily; there are ways of doing this. So the overall number of such permutations is . Since the permutations are chosen independently, we get\nPlugging this in and then using , we get\nWe now set our constants to , , and and get\nNow let be the event that there is a subset of of size at most that has at most neighbors. We get\nThe same analysis for subsets of yields that the probability that there is a set in or of size at most that has too few neighbors is at most . It follows that there is a graph with the desired properties.\n\u220e\nWe call a graph an vertex expander\nif , the maximum degree is at most , and for all sets of at most vertices, the neighborhood has size at least .\nThere are constants , , such that there is a class of bipartite -expander with vertices in every color class for infinitely many values .\nWe take the graphs from Lemma 6.3 ###reference_### with the same values , and . Fix and consider any set of size at most . Assume w.l.o.g. that , so . Then we get that\n\u220e\nThe class of graphs in Corollary 6.3 ###reference_### will be the class of graphs for Lemma 6.2 ###reference_###. We now construct the distant matchings. To this end, consider a graph from this class and a set of vertices of size at most . First construct a matching between and . Since has neighbors outside of and every vertex had degree at most , one can greedily find such a matching of size . In a second step, we choose an induced matching out of this matching greedily. Since every edge has at most edges at distance , this yields an induced matching of size which completes the proof.\nIf is not a proper block matrix, then, by Lemma 3 ###reference_###, the matrix has a -submatrix\nwith exactly three non-empty entries. So let such that and\n, and are all non-empty.\nWe describe a construction that to every bipartite graph gives a formula as follows: for every vertex , we introduce a variable and for every vertex we introduce a variable . Then, for every edge where and , we add a constraint where consists of variables only used in this constraint. We fix the notation , and .\nLet be the family of formulas defined by where is the family from Lemma 6.2 ###reference_###. Clearly, , as required. Fix for the remainder of the proof and let . Let \nbe the formula we get from by restricting all variables to and all variables to by adding some unary constraints.\nLet be an --balanced rectangle cover of where is the constant from Lemma 6.2 ###reference_###. We claim that the size of is at least , where\nand is the degree of .\nTo prove this, we first show that for every ,\n\nSo let . Since is an --balanced rectangle, we may assume . By choice of , we have that there is an induced matching in of size at least consisting of edges that have\none endpoint corresponding to a variable in and one endpoint corresponding to a variable in . Consider an edge . Assume that and . Since we have , we get\nBy construction , so it follows that either or . Assume w.l.o.g. that (the other case can be treated analogously). It follows that for each solution , we get a solution by setting\n,\nFor all , we set ,\nFor all and all we set where is such that\n.\nNote that values exist because is non-empty. Observe that\nfor two different solutions and the solutions \nand may be the same. However, we can bound the number ,\ngiving a lower bound on the set of solutions not in .\nTo this end, suppose that . Since only changes the values\nof , exactly -variables and at most vectors of -variables (the two latter bounds come from the degree bounds on ),\n implies that and coincide\non all other variables. This implies\nbecause there are only that many possibilities for the variables that\n might change. By considering , we have shown that\nSo we have constructed \nsolutions not in . Now we consider not only one\nedge but all possible subsets of edges in :\nfor a solution , the assignment is constructed as the \nabove, but for all edges . Reasoning as above, we get\nIt is immediate to see that \nfor . Thus we get\nIt follows that every -balanced rectangle cover of has to have a size of at least .\nWith Lemma 6 ###reference_### and Lemma 2 ###reference_.SSS0.P0.SPx6### it follows that any DNNF for has to have a size of at least .\n\u220e\nNow we consider the case that is a proper block matrix, but is not blockwise decomposable in some pair of variables .\nLet be a relation such that is a proper block matrix but in not blockwise decomposable in and . Then there is a family of formulas and\n such that a DNNF for needs to have a\nsize of at least .\nThe proof follows the same ideas as that of Lemma 6.2 ###reference_###, so we state only the differences. If is a\nproper block matrix but is not blockwise decomposable in and , there is a block such that is not decomposable, in such a way that and appear in different factors. This implies that if , and , then .\nFor one\nmatching edge \nthe projection onto is\nof the form , so there must exist and not in .\nThis is used for the edge to construct solutions in . Since is a block, we define as follows:\n,\nFor all we set such that .\nFor all we set\n such that\n.\nTo bound , note that we have at most \npossibilities in case (a) and, since , at most possibilities in case (b)\nand (c), respectively.\nIt follows that we can bound the number for a solution by\nso we get:\nThe rest of the proof is unchanged.\n\u220e\nWe now have everything in place to prove Proposition 6.2 ###reference_###.\nSince is not strongly blockwise decomposable, there is a relation that is not blockwise decomposable in and . Then, by definition of co-clones and Lemma 4.1 ###reference_###, there is a -formula that defines .\nIf there are variables such that is not a proper block matrix, then we can apply Lemma 6.2 ###reference_### to get -formulas that require exponential size DNNF. Then by substituting all occurrences of in these formula by the -formula defining , we get the required hard -formulas. If all are proper block matrices, then there are variables such that is not blockwise decomposable. Using Lemma 6.3 ###reference_### and reasoning as before, then completes the proof.\n\u220e"
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "6.4",
|
| 91 |
+
"parent_section_id": "6",
|
| 92 |
+
"section_name": "Lower Bound for structured DNNF",
|
| 93 |
+
"text": "In this section, we prove the lower bound of Theorem 3 ###reference_### which we formulate here again.\nLet be a constraint language that is not strongly uniformly blockwise decomposable. Then there is a family of -formulas of size and\n such that any structured DNNF for has\nsize at least .\nNote that for all constraint languages that are not strongly blockwise decomposable, the result follows directly from Proposition 6.2 ###reference_###, so we only have to consider constraint languages which are strongly blockwise decomposable but not strongly uniformly blockwise decomposable. We start with a simple observation.\nLet be a rectangle with respect to the partition . Let , then is a rectangle with respect to the partition .\nWe start our proof of Proposition 6.4 ###reference_### by considering a special case.\nLet be a constraint such that there are two assignments such that for every partition of we have that or . Consider\nwhere the are disjoint variable vectors.\nLet be a variable partition of the variables of and be a rectangle cover of such that each rectangle in respects the partition . If for all we have that all and or and , then has size at least .\nWe use the so-called fooling set method from communication complexity, see e.g. [KushilevitzN97, Section 1.3]. To this end, we will construct a set of satisfying assignments of such that every rectangle of can contain at most one assignment in .\nSo let be the assignment to that assigns the variables analogously to , so , , and . Define analogously . Then the set consists of all assignments that we get by choosing for every an assignment as either or and then combining the to one assignment to all variables of . Note that contains assignments and that all of them satisfy , so all of them must be in a rectangle of .\nWe claim that none of the rectangles of can contain more than one element . By way of contradiction, assume this were not true. Then there is an that contains two assignments , so there is an such that in the construction of we have chosen while in the construction of we have chosen . Let and . Since , we have that . Moreover, by Observation 6.4 ###reference_###, is a rectangle and so we have that and . But consists only of solutions of and thus , so . It follows by construction that there is a partition of such that and are in . This contradicts the assumption on and and thus can only contain one assignment from .\nSince has size and all of assignments in must be in one rectangle of , it follows that consists of at least rectangles.\n\u220e\nWe now prove the lower bound of Proposition 6.4 ###reference_###.\nSince is not strongly uniformly blockwise decomposable, let be a constraint in that is not uniformly blockwise decomposable in and .\nIf is such that is not a proper block matrix, then the lemma follows directly from Lemma 6.2 ###reference_###, so we assume in the remainder that is a proper block matrix. We denote for every block of by the sub-constraint of we get by restricting to and to . Since is not uniformly blockwise decomposable, for every partition of there is a block such that .\nGiven a bipartite graph , we construct the same formula as in the proof of Lemma 6.2 ###reference_###. Consider again the graphs of the family from Lemma 6.2 ###reference_### and let . Fix in the remainder of the proof. Let be a structured DNNF representing of size . Then, by Proposition 6 ###reference_###, there is an -balanced partition of such that there is a rectangle cover of of size at most and such that all rectangles respect the partition . Let be the set of edges such that and are in different parts of the partition . By the properties of , there is an induced matching of size consisting of edges in .\nFor every edge let and . Assume that and (the other case is treated analogously). Then we know that there is a block of such that\nSince there are only at most blocks in , there is a block such that for at least edges Equation (15 ###reference_###) is true. Call this set of edges .\nLet . We construct a structured DNNF from by existentially quantifying all variables not in a constraint for and for all restricting the domain to and to if . Note that every assignment to that assigns every variable with to a value in and every with to a value in can be extended to a satisfying assignment of , because is a block. Thus, is a representation of\nWe now use the following simple observation.\nLet be a constraint such that . Then there are assignments such that or .\nSince and thus , we have that there is and such that . Simply extending and to an assignment in yields the claim.\n\u220e\nSince Claim 6.4 ###reference_### applies to all constraints in , we are now in a situation where we can use Lemma 6.4 ###reference_### which shows that any rectangle cover respecting the partition for has size . With Lemma 6 ###reference_###, we know that and since the construction of from does not increase the size of the DNNF, we get .\n\u220e"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "7",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "The Boolean Case",
|
| 99 |
+
"text": "In this section, we will specialize our dichotomy results for the Boolean domain .\nA relation over is called bijunctive affine if it can be written as a conjunction of the relations and and unary relations, so with where and }. A set of relations is called bijunctive affine if all are bijunctive affine. We will show the following dichotomy for the Boolean case:\nLet be a constraint language over the Boolean domain. If all relations in are bijunctive affine, then there is an polynomial time algorithm that, given\na -formula , constructs an OBDD for . If not, then\nthere is a family of -formulas and such that a DNNF\nfor needs to have a size of at least .\n\nLet us remark here that, in contrast to general domains , there is no advantage of FDD over ODD in the Boolean case: either a constraint language allows for efficient representation by ODD or it is hard even for DNNF. So in a sense, the situation over the Boolean domain is easier. Also note that the tractable cases over the Boolean domain are very restricted, allowing only equalities and disequalities.\nWith Theorem 3 ###reference_### and Theorem 3 ###reference_###, we only need to show the following:\nEvery is strongly uniformly blockwise decomposable.\nIf is strongly blockwise decomposable, then .\nWe show that is strongly uniformly blockwise decomposable \u2013 which implies that every is also strongly uniformly blockwise decomposable. Let . Then can be constructed by conjunctions of constraints in the relations of and projections. Let be the relation we get by the same conjunctions as for but not doing any of the projections. By Lemma 4.1 ###reference_###, it suffices to show that is uniformly blockwise decomposable. To this end, consider the graph with , two vertices and are connected with a blue edge if the representation of contains and connected with a red edge if contains . A vertex is blue if contains and red if contains .\nIf is represented by\nthen is the graph in Figure 3 ###reference_###.\n\u220e\nWe show that is a proper block matrix and each block is decomposable using the same variable partition. If and are in different connected components of , is decomposable such that and appear in different factors, so has only one block which is decomposable. If has no satisfying assignments, there is nothing to show. It remains the case where has satisfying assignments and and are in the same connected component.\nSo let have at least one model and let have only one connected component. Note that setting one variable in to or determines the value of all other variables in . This implies that if a vertex in is colored, then has three empty entries and one entry with exactly one element. If no vertex in is colored, then is either (if every path from to has an even number of red edges) or (if every path from to has an odd number of edges). So has exactly two non-empty entries with one element each, so is decomposable in and . This completes the proof of Item 1 ###reference_i1###.\nLet now be strongly blockwise decomposable and be decomposed into indecomposable factors\nWe have to show that for all which implies that ). Since is indecomposable, has to have at least two blocks for every . Since is Boolean, only two cases remain: is either equality or disequality on and . In every case, the value of determines the value of and vice versa. Since and are arbitrary, each variable in determines the value of every other variable in . This implies that non-empty entries of have exactly one element. It follows that has exactly two satisfying assignments and moreover, no variable can take the same value in these two assignments since otherwise would be decomposable. It follows that after fixing a value for a variable , the values of all other variables are determined by the constraint or , so is a conjunction of constraints with relations in which completes the proof of Item 2 ###reference_i2### and thus the theorem.\n\u220e"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "8",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "Decidability of Strong (Uniform) Blockwise Decomposability",
|
| 105 |
+
"text": "In this section, we will show that strong (uniform) blockwise decomposability is decidable."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "8.1",
|
| 109 |
+
"parent_section_id": "8",
|
| 110 |
+
"section_name": "The Algorithm",
|
| 111 |
+
"text": "We will here first give the algorithm to decide strong (uniform) blockwise decomposability. The algorithm will rely on properties of constraint languages formulated below in Proposition 8.1 ###reference_### which we will then prove in the following sections.\nFor our algorithm, we will heavily rely on the following well-known result for deciding containment in co-clones whose proof can be found in [Dalmau00, Lemma 42].\nThere is an algorithm that, given a constraint language and a relation , decides if .\nIn a first step in our algorithm, we would like to restrict to the\ncase of binary relations by taking binary projections as it is done in\nProposition 4.1 ###reference_###. However, as the following example shows, this is in general not correct since there are constraint languages that are not blockwise decomposable while their binary projection is.\nConsider the relation which is the parity relation on three variables. Consider the constraint and the selection matrix\nThe single block of this matrix is obviously not decomposable, so is not blockwise decomposable.\nLet us now compute . To this end, call a relation affine if it is the solution set of a system of linear equations over the finite field with elements. Obviously, all -formulas define affine relations. The following fact is well known, see e.g. [Dalmau00, Lemma 5] for a proof.\nEvery projection of an affine constraint is affine.\nIt follows that only contains affine relations of arity at most . But, as we saw in the proof of Theorem 7 ###reference_###, in that case is strongly uniformly blockwise decomposable.\nAs a consequence, we have that is not blockwise decomposable while its binary projection is even strongly uniformly blockwise decomposable.\n\u220e\nTo avoid the problem of Example 8.1 ###reference_###, we will make\nsure that for the constraint language at hand we have\n, which we will test with the help of\nTheorem 8.1 ###reference_### below. If this is\nnot the case, then by the contraposition of\nProposition 4.1 ###reference_###, we already know that the\nlanguage is not strongly blockwise decomposable.\nAfterwards, we can focus on and utilize the following\nproposition, which is the main technical contribution of this section.\nLet be a constraint language and\n the constraint language consisting of all unary and binary pp-definable relations over .\nis strongly uniformly blockwise decomposable if and\nonly if all relations of arity at most in\n are uniformly blockwise decomposable.\nis strongly blockwise decomposable if and only if all relations of arity at most in are blockwise decomposable.\nBefore we prove Proposition 8.1 ###reference_### in the next two subsections, let us show how it yields the desired decidability results.\nThere is an algorithm that, given a constraint language , decides if is strongly blockwise decomposable. Moreover, there is also an algorithm that, given a constraint language , decides if is strongly uniformly blockwise decomposable.\nWe only consider blockwise decomposability since the proof for uniform decomposability is completely analogous.\nLet be the given constraint language over the domain .\nCompute by testing for every\nunary and binary relation over whether\n using\nTheorem 8.1 ###reference_###.\nAgain using Theorem 8.1 ###reference_###,\nwe test for every , whether\n. If the answer is no, then by\nwe can conclude by Proposition 4.1 ###reference_### that\n is not strongly blockwise decomposable. Otherwise, we know that .\nBy applying Theorem 8.1 ###reference_### a\nthird time, we compute all at most ternary relations in\n and test whether they are blockwise\ndecomposable by a brute-force application of Definition 3 ###reference_###. By\nProposition 8.1 ###reference_### this is the case if and only if\n and hence is strongly blockwise\ndecomposable. \u220e"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "8.2",
|
| 115 |
+
"parent_section_id": "8",
|
| 116 |
+
"section_name": "Proof of Proposition 8.1.1 (the uniform case)",
|
| 117 |
+
"text": "In this section, we will show Proposition 8.1 ###reference_### for the\ncase of strong uniform blockwise decomposability. Obviously, if\n contains a relation of arity at most that is\nnot uniformly blockwise decomposable, then is not strongly\nuniformly blockwise decomposable. So we only have to show the other\ndirection of the claim.\nWe first aim to get a better understanding of . Let . By Lemma 4.1 ###reference_###, it suffices to consider the case in which has a pp-definition without any projections, so there is a pp-definition of the constraint of the form\nwhere is a relation from (here we use that\n as it is pp-definable and that \nis closed under intersections).\nWe show that in our setting we get a decomposition as in Lemma 5.1 ###reference_###.\nFor any there is an undirected tree\n with vertex set and edge such that\nIf has less than three variables, there is nothing to show, so\nwe will first consider the case of , showing first the\nfollowing slightly stronger statement ():\nLet be a ternary\nconstraint with constraint relation in , then in the representation (16 ###reference_###) one of the constraints or is the trivial constraint .\nSince we are proving the backward direction of Proposition 8.1 ###reference_###.1, is\nuniformly blockwise decomposable by assumption. Thus, we can apply\nLemma 4.3 ###reference_### to see that can be rewritten in one of the forms\nAssertion () immediately follows and\nsetting and proves Claim 8.2 ###reference_###\nfor .\nNow assume that . Choose any variable and consider the formula\nThen we have by definition that\nWe claim that we can rewrite such that only at most one of the is not the trivial relation . To see this, assume that there are two different variables such that and are both nontrivial. Let be the constraint defined by and let . Consider the formula . Applying (), we get that we can rewrite such that one of or is trivial. Substituting this rewrite in for in and iterating the process yields that there is in the end only one non-trivial . Let be the only variable for which might be non-trivial, then we can assume that\nSince has fewer variables than , we get by induction that\nthere is a tree with vertex set and\n such that\nAdding as a new leaf connected to gives the desired tree for .\n\u220e\nUsing Claim 8.2 ###reference_###, we now show that is\nuniformly blockwise decomposable. To this end, we fix two variables\n and show that is uniformly blockwise decomposable in .\nTo see that is a proper block matrix, observe that has the same non-empty entries as . But is in and thus by assumption uniformly blockwise decomposable. It follows that its selection matrix is a proper block matrix which is then also true for .\nWe now apply Claim 8.2 ###reference_### to and let be the\nresulting tree. Since is an edge in ,\n consists of two trees and ,\ncontaining and , respectively. By setting\n and ,\nClaim 8.2 ###reference_### implies that can be written as\nThis implies that each block of is decomposable in\n and hence that is uniformly blockwise\ndecomposable."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "8.3",
|
| 121 |
+
"parent_section_id": "8",
|
| 122 |
+
"section_name": "Proof of Proposition 8.1.2 (the nonuniform case)",
|
| 123 |
+
"text": "In this section, we prove Proposition 8.1 ###reference_### for the case\nof blockwise decomposability, so let be a\nconstraint language and . We will first define the following new property of .\nWe say that has an incompatible block structure if and only if there are binary relations in such that has blocks and and has blocks and such that , , , and are all non-empty.\nIn the remainder of this section we will show that the following are\nequivalent, the equivalence between (1) and (2) then establishes Proposition 8.1 ###reference_###.2:\nis strongly blockwise decomposable.\nEvery ternary relation in is blockwise decomposable.\nis blockwise decomposable and has no incompatible block structure.\nThe direction (1) (2) is trivial, (2) (3)\nwill be shown in Lemma 8.3 ###reference_###, and (3) (1) is stated in Lemma 8.3 ###reference_###.\nIf has an incompatible block structure, then contains a ternary relation that is not blockwise decomposable.\nLet and the corresponding blocks be chosen as in Definition 8.3 ###reference_###. We claim that the constraint\nis not blockwise decomposable. To this end, choose values . Then we have by construction that\nand all these sets are all non-empty because of the incompatible block structure. Now assume, by way of contradiction, that is blockwise decomposable. Then is a proper block matrix and the entries all lie in the same block . Then decoposes with respect to , because we assumed that is blockwise decomposable, so\nHowever, in the first case we have\nso in particular there is an element of in both and which contradicts the assumption that and are different blocks of .\nIn the second case, we get\nwhich leads to an analogous contradiction to and being blocks of . Thus, in both cases we get a contradiction, so cannot be blockwise decomposable.\n\u220e\nIf is blockwise decomposable and has no incompatible block structure, then it is strongly blockwise decomposable.\nLet be in . We will show that \nis blockwise decomposable. Because of\nLemma 4.1 ###reference_###, we may w.l.o.g. assume\nthat is pp-definable by a -formula without\nprojection. Thus, as in the proof of Proposition 8.1 ###reference_###.1 we can write as\nwhere . Now using again the fact that\n and thus in particular for all , we can actually write as\nFix two variables . Since is blockwise decomposable\nby assumption of the lemma, we have that and hence is a proper block matrix (because\nthey have the same block structure). We have to show that all of its blocks decompose. So fix a block of and consider the restriction . As before, we get a representation\nWe now assign a graph to as follows: vertices are the\nvariables in ; two variables are connected by an edge\nif and only if has more than one block. Since \nis blockwise decomposable, we have, again using that , that all are proper\nblock matrices. So the variables that are not connected are\nexactly those where has exactly one block. So in\nparticular, there is no edge between and . The crucial\nproperty is that the edge relation is transitive:\nFor all , if and are edges in , then is also an edge.\nAgain, for all , the matrices and have the same blocks. Thus, is an edge in if and only if has more than one block.\nWe will use the following observation throughout the remainder of this proof which follows directly from the fact that we have .\nFor every element there must be a block of and a block of that both contain .\nLet be the blocks of\n and be the blocks of\n. By assumption of the claim we have and\nby Observation 8.3 ###reference_### . We claim that then there are two blocks\n and such that for all we have . To see this, consider two cases: if there is an that is not fully contained in any , then we can choose an arbitrary other set to satisfy the claim. Otherwise, choose arbitrarily such that . Since , there are elements from that are not in and thus there is an . But then has the desired property since contains only elements from . So in any case we can choose such that for all we have that .\nIt follows that we can fix two distinct\nblocks and such that and . Since\n does not have an incompatible block structure, it must be\nthe case that either or . W.l.o.g. assume\nthe former. It follows that for any , and\nevery domain element it is not the case that and . Therefore and . Since by\nthe choice of and we have and , we get that has\nnon-empty entries in row and column but not at their intersection, implying that the matrix contains at least\ntwo blocks. This finishes the proof of Claim 8.3 ###reference_###.\n\u220e\nIt remains to prove that is decomposable w.r.t. some partition \nwith and . To this end, we let be the connected\ncomponent of in the graph and . Note\nthat because is not an edge and the edge relation is\ntransitive. For every and the matrix\n has exactly one block and therefore\n. This\nimplies that\nproving that is decomposable w.r.t. .\n\u220e"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "8.4",
|
| 127 |
+
"parent_section_id": "8",
|
| 128 |
+
"section_name": "A separating example",
|
| 129 |
+
"text": "We have established that strong blockwise decomposability as well\nas strong uniform blockwise decomposability are decidable. It follows\nthat there is an algorithm that decides for a given constraint language \nif either\nevery CSP() instance can be encoded into a polynomial-size ODD or\nevery CSP() instance can be encoded into a polynomial-size FDD,\nbut some\nCSP() instances require exponential-size ODDs (and structured\nDNNFs) or\nthere are CSP() instances that require exponential-size FDDs\n(and DNNFs).\nWe have seen that there are constraint languages falling in the first\nand third category. Furthermore, there is no Boolean constraint\nlanguage falling in the second category. However, in\nthe non-Boolean case there are constraint languages with this property.\nTo see this, we utilize our new criterion for strong blockwise\ndecomposability: A constraint language is strongly blockwise\ndecomposable if and only if every binary relation in is\nblockwise decomposable (which is the same as being rectangular [DyerR13]) and there are no two relations in \nwith an incompatible block structure. Lets come back to our running\nexample from Section 4.1 ###reference_###, see\nFigure 1 ###reference_###. We have already observed in Example 3 ###reference_### that\n is not uniformly blockwise decomposable and hence is not\nstrongly uniformly blockwise decomposable. Moreover, we have computed in\nExample 4.1 ###reference_###. By inspecting these relations (see Figure 4 ###reference_###) it follows that they are\nall blockwise decomposable and that no two relations have pairs of incompatible\nblocks as stated in Definition 8.3 ###reference_###. It\nfollows by Lemma 8.3 ###reference_### that is strongly\nblockwise decomposable and thus serves as a separating example\nof our two central notions. This leads to the following theorem, which\nshould be contrasted with the Boolean case where both notions collapse\n(Theorem 7 ###reference_###).\nThere is a constraint language over a 4-element domain that is strongly\nblockwise decomposable, but not strongly uniformly\nblockwise decomposable. Thus, every CSP() instance can be\ndecided by a polynomial-size FDD, but there are CSP()\ninstances that require structured DNNFs (and ODDs) of exponential size."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "9",
|
| 133 |
+
"parent_section_id": null,
|
| 134 |
+
"section_name": "Conclusion",
|
| 135 |
+
"text": "We have seen that there is a dichotomy for compiling systems of constraints into DNNF based on the constraint languages. It turns out that the constraint languages that allow efficient compilation are rather restrictive, in the Boolean setting they consist essentially only of equality and disequality. From a practical perspective, our results are thus largely negative since interesting settings will most likely lie outside the tractable cases we have identified.\nWithin the polynomially compilable constraint languages we have identified and\nseparated two categories, depending on whether they guarantee polynomial-size\nstructured representations. Moreover, both properties are decidable.\nA few questions remain open. The first is to get a better grasp on the\nefficiently compilable constraint languages. Is there is simpler\ncombinatorial description, or is there an algebraic characterization\nusing polymorphisms? Is there a simpler way of testing strong\n(uniform) blockwise decomposability that avoids\nTheorem 8.1 ###reference_###? What is the exact\ncomplexity?"
|
| 136 |
+
}
|
| 137 |
+
],
|
| 138 |
+
"appendix": [],
|
| 139 |
+
"tables": {},
|
| 140 |
+
"image_paths": {},
|
| 141 |
+
"validation": true,
|
| 142 |
+
"references": [
|
| 143 |
+
{
|
| 144 |
+
"1": {
|
| 145 |
+
"title": "Unbalancing sets and an almost quadratic lower bound for\nsyntactically multilinear arithmetic circuits.",
|
| 146 |
+
"author": "Noga Alon, Mrinal Kumar, and Ben Lee Volk.",
|
| 147 |
+
"venue": "Comb., 40(2):149\u2013178, 2020.",
|
| 148 |
+
"url": null
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"2": {
|
| 153 |
+
"title": "A circuit-based approach to efficient enumeration.",
|
| 154 |
+
"author": "Antoine Amarilli, Pierre Bourhis, Louis Jachiet, and Stefan Mengel.",
|
| 155 |
+
"venue": "In Ioannis Chatzigiannakis, Piotr Indyk, Fabian Kuhn, and Anca\nMuscholl, editors, 44th International Colloquium on Automata, Languages,\nand Programming, ICALP 2017, July 10-14, 2017, Warsaw, Poland, volume 80\nof LIPIcs, pages 111:1\u2013111:15. Schloss Dagstuhl - Leibniz-Zentrum\nf\u00fcr Informatik, 2017.",
|
| 156 |
+
"url": null
|
| 157 |
+
}
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"3": {
|
| 161 |
+
"title": "Connecting knowledge compilation classes and width parameters.",
|
| 162 |
+
"author": "Antoine Amarilli, Florent Capelli, Mika\u00ebl Monet, and Pierre Senellart.",
|
| 163 |
+
"venue": "Theory Comput. Syst., 64(5):861\u2013914, 2020.",
|
| 164 |
+
"url": null
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"4": {
|
| 169 |
+
"title": "Compiling CSPs: A complexity map of (non-deterministic)\nmultivalued decision diagrams.",
|
| 170 |
+
"author": "J\u00e9r\u00f4me Amilhastre, H\u00e9l\u00e8ne Fargier, Alexandre Niveau,\nand C\u00e9dric Pralet.",
|
| 171 |
+
"venue": "Int. J. Artif. Intell. Tools, 23(4), 2014.",
|
| 172 |
+
"url": null
|
| 173 |
+
}
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"5": {
|
| 177 |
+
"title": "A characterization of efficiently compilable constraint languages.",
|
| 178 |
+
"author": "Christoph Berkholz, Stefan Mengel, and Hermann Wilhelm.",
|
| 179 |
+
"venue": "In Olaf Beyersdorff, Mamadou Moustapha Kant\u00e9, Orna Kupferman,\nand Daniel Lokshtanov, editors, 41st International Symposium on\nTheoretical Aspects of Computer Science, STACS 2024, March 12-14, 2024,\nClermont-Ferrand, France, volume 289 of LIPIcs, pages 11:1\u201311:19.\nSchloss Dagstuhl - Leibniz-Zentrum f\u00fcr Informatik, 2024.",
|
| 180 |
+
"url": null
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"6": {
|
| 185 |
+
"title": "A dichotomy for succinct representations of homomorphisms.",
|
| 186 |
+
"author": "Christoph Berkholz and Harry Vinall-Smeeth.",
|
| 187 |
+
"venue": "In Kousha Etessami, Uriel Feige, and Gabriele Puppis, editors, 50th International Colloquium on Automata, Languages, and Programming,\nICALP 2023, July 10-14, 2023, Paderborn, Germany, volume 261 of LIPIcs, pages 113:1\u2013113:19. Schloss Dagstuhl - Leibniz-Zentrum f\u00fcr\nInformatik, 2023.",
|
| 188 |
+
"url": null
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"7": {
|
| 193 |
+
"title": "Expander CNFs have exponential DNNF size.",
|
| 194 |
+
"author": "Simone Bova, Florent Capelli, Stefan Mengel, and Friedrich Slivovsky.",
|
| 195 |
+
"venue": "CoRR, abs/1411.1995, 2014.",
|
| 196 |
+
"url": null
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"8": {
|
| 201 |
+
"title": "Knowledge compilation meets communication complexity.",
|
| 202 |
+
"author": "Simone Bova, Florent Capelli, Stefan Mengel, and Friedrich Slivovsky.",
|
| 203 |
+
"venue": "In Subbarao Kambhampati, editor, Proceedings of the Twenty-Fifth\nInternational Joint Conference on Artificial Intelligence, IJCAI 2016, New\nYork, NY, USA, 9-15 July 2016, pages 1008\u20131014. IJCAI/AAAI Press, 2016.",
|
| 204 |
+
"url": null
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"9": {
|
| 209 |
+
"title": "Graph-based algorithms for boolean function manipulation.",
|
| 210 |
+
"author": "Randal E. Bryant.",
|
| 211 |
+
"venue": "IEEE Trans. Computers, 35(8):677\u2013691, 1986.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"10": {
|
| 217 |
+
"title": "The complexity of the counting constraint satisfaction problem.",
|
| 218 |
+
"author": "Andrei A. Bulatov.",
|
| 219 |
+
"venue": "J. ACM, 60(5):34:1\u201334:41, 2013.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"11": {
|
| 225 |
+
"title": "A dichotomy theorem for nonuniform CSPs.",
|
| 226 |
+
"author": "Andrei A. Bulatov.",
|
| 227 |
+
"venue": "In Chris Umans, editor, 58th IEEE Annual Symposium on\nFoundations of Computer Science, FOCS 2017, Berkeley, CA, USA, October\n15-17, 2017, pages 319\u2013330. IEEE Computer Society, 2017.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"12": {
|
| 233 |
+
"title": "Enumerating homomorphisms.",
|
| 234 |
+
"author": "Andrei A. Bulatov, V\u00edctor Dalmau, Martin Grohe, and D\u00e1niel Marx.",
|
| 235 |
+
"venue": "J. Comput. Syst. Sci., 78(2):638\u2013650, 2012.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"13": {
|
| 241 |
+
"title": "Complexity of counting CSP with complex weights.",
|
| 242 |
+
"author": "Jin-Yi Cai and Xi Chen.",
|
| 243 |
+
"venue": "J. ACM, 64(3):19:1\u201319:39, 2017.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"14": {
|
| 249 |
+
"title": "Understanding the complexity of #SAT using knowledge\ncompilation.",
|
| 250 |
+
"author": "Florent Capelli.",
|
| 251 |
+
"venue": "In 32nd Annual ACM/IEEE Symposium on Logic in Computer\nScience, LICS 2017, Reykjavik, Iceland, June 20-23, 2017, pages 1\u201310.\nIEEE Computer Society, 2017.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"15": {
|
| 257 |
+
"title": "Tractable QBF by Knowledge Compilation.",
|
| 258 |
+
"author": "Florent Capelli and Stefan Mengel.",
|
| 259 |
+
"venue": "In Rolf Niedermeier and Christophe Paul, editors, 36th\nInternational Symposium on Theoretical Aspects of Computer Science (STACS\n2019), volume 126 of Leibniz International Proceedings in Informatics\n(LIPIcs), pages 18:1\u201318:16, Dagstuhl, Germany, 2019. Schloss\nDagstuhl\u2013Leibniz-Zentrum fuer Informatik.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"16": {
|
| 265 |
+
"title": "The complexity of general-valued constraint satisfaction problems\nseen from the other side.",
|
| 266 |
+
"author": "Cl\u00e9ment Carbonnel, Miguel Romero, and Stanislav Zivn\u00fd.",
|
| 267 |
+
"venue": "SIAM J. Comput., 51(1):19\u201369, 2022.",
|
| 268 |
+
"url": null
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"17": {
|
| 273 |
+
"title": "Dynamic minimization of sentential decision diagrams.",
|
| 274 |
+
"author": "Arthur Choi and Adnan Darwiche.",
|
| 275 |
+
"venue": "In Marie desJardins and Michael L. Littman, editors, Proceedings\nof the Twenty-Seventh AAAI Conference on Artificial Intelligence, July\n14-18, 2013, Bellevue, Washington, USA, pages 187\u2013194. AAAI Press,\n2013.",
|
| 276 |
+
"url": null
|
| 277 |
+
}
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"18": {
|
| 281 |
+
"title": "A dichotomy theorem for maximum generalized satisfiability problems.",
|
| 282 |
+
"author": "Nadia Creignou.",
|
| 283 |
+
"venue": "J. Comput. Syst. Sci., 51(3):511\u2013522, 1995.",
|
| 284 |
+
"url": null
|
| 285 |
+
}
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"19": {
|
| 289 |
+
"title": "Complexity of generalized satisfiability counting problems.",
|
| 290 |
+
"author": "Nadia Creignou and Miki Hermann.",
|
| 291 |
+
"venue": "Inf. Comput., 125(1):1\u201312, 1996.",
|
| 292 |
+
"url": null
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"20": {
|
| 297 |
+
"title": "Complexity classifications of boolean constraint satisfaction\nproblems.",
|
| 298 |
+
"author": "Nadia Creignou, Sanjeev Khanna, and Madhu Sudan.",
|
| 299 |
+
"venue": "SIAM, 2001.",
|
| 300 |
+
"url": null
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"21": {
|
| 305 |
+
"title": "Enumerating all solutions of a boolean CSP by non-decreasing\nweight.",
|
| 306 |
+
"author": "Nadia Creignou, Fr\u00e9d\u00e9ric Olive, and Johannes Schmidt.",
|
| 307 |
+
"venue": "In Karem A. Sakallah and Laurent Simon, editors, Theory and\nApplications of Satisfiability Testing - SAT 2011 - 14th International\nConference, SAT 2011, Ann Arbor, MI, USA, June 19-22, 2011. Proceedings,\nvolume 6695 of Lecture Notes in Computer Science, pages 120\u2013133.\nSpringer, 2011.",
|
| 308 |
+
"url": null
|
| 309 |
+
}
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"22": {
|
| 313 |
+
"title": "Computational Complexity of Problems over Generalized Formulas.",
|
| 314 |
+
"author": "V\u00edctor Dalmau.",
|
| 315 |
+
"venue": "PhD thesis, Universitat Polit\u00e9cnica de Catalunya, 2000.",
|
| 316 |
+
"url": null
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"23": {
|
| 321 |
+
"title": "The complexity of counting homomorphisms seen from the other side.",
|
| 322 |
+
"author": "V\u00edctor Dalmau and Peter Jonsson.",
|
| 323 |
+
"venue": "Theor. Comput. Sci., 329(1-3):315\u2013323, 2004.",
|
| 324 |
+
"url": null
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"24": {
|
| 329 |
+
"title": "Decomposable negation normal form.",
|
| 330 |
+
"author": "Adnan Darwiche.",
|
| 331 |
+
"venue": "J. ACM, 48(4):608\u2013647, 2001.",
|
| 332 |
+
"url": null
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"25": {
|
| 337 |
+
"title": "New advances in compiling CNF into decomposable negation normal\nform.",
|
| 338 |
+
"author": "Adnan Darwiche.",
|
| 339 |
+
"venue": "In Ram\u00f3n L\u00f3pez de M\u00e1ntaras and Lorenza Saitta,\neditors, Proceedings of the 16th Eureopean Conference on Artificial\nIntelligence, ECAI\u20192004, including Prestigious Applicants of Intelligent\nSystems, PAIS 2004, Valencia, Spain, August 22-27, 2004, pages 328\u2013332.\nIOS Press, 2004.",
|
| 340 |
+
"url": null
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"26": {
|
| 345 |
+
"title": "A knowledge compilation map.",
|
| 346 |
+
"author": "Adnan Darwiche and Pierre Marquis.",
|
| 347 |
+
"venue": "J. Artif. Intell. Res., 17:229\u2013264, 2002.",
|
| 348 |
+
"url": null
|
| 349 |
+
}
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"27": {
|
| 353 |
+
"title": "Query processing on probabilistic data: A survey.",
|
| 354 |
+
"author": "Guy Van den Broeck and Dan Suciu.",
|
| 355 |
+
"venue": "Found. Trends Databases, 7(3-4):197\u2013341, 2017.",
|
| 356 |
+
"url": null
|
| 357 |
+
}
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"28": {
|
| 361 |
+
"title": "An effective dichotomy for the counting constraint satisfaction\nproblem.",
|
| 362 |
+
"author": "Martin E. Dyer and David Richerby.",
|
| 363 |
+
"venue": "SIAM J. Comput., 42(3):1245\u20131274, 2013.",
|
| 364 |
+
"url": null
|
| 365 |
+
}
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"29": {
|
| 369 |
+
"title": "On the use of partially ordered decision graphs in knowledge\ncompilation and quantified boolean formulae.",
|
| 370 |
+
"author": "H\u00e9l\u00e8ne Fargier and Pierre Marquis.",
|
| 371 |
+
"venue": "In Proceedings, The Twenty-First National Conference on\nArtificial Intelligence and the Eighteenth Innovative Applications of\nArtificial Intelligence Conference, July 16-20, 2006, Boston, Massachusetts,\nUSA, pages 42\u201347. AAAI Press, 2006.",
|
| 372 |
+
"url": null
|
| 373 |
+
}
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"30": {
|
| 377 |
+
"title": "The complexity of homomorphism and constraint satisfaction problems\nseen from the other side.",
|
| 378 |
+
"author": "Martin Grohe.",
|
| 379 |
+
"venue": "J. ACM, 54(1), mar 2007.",
|
| 380 |
+
"url": null
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"31": {
|
| 385 |
+
"title": "Sur les assemblages de lignes.",
|
| 386 |
+
"author": "Camille Jordan.",
|
| 387 |
+
"venue": "Journal f\u00fcr die reine und angewandte Mathematik,\n70:185\u2013190, 1869.",
|
| 388 |
+
"url": null
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"32": {
|
| 393 |
+
"title": "The approximability of constraint satisfaction problems.",
|
| 394 |
+
"author": "Sanjeev Khanna, Madhu Sudan, Luca Trevisan, and David P. Williamson.",
|
| 395 |
+
"venue": "SIAM J. Comput., 30(6):1863\u20131920, 2000.",
|
| 396 |
+
"url": null
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"33": {
|
| 401 |
+
"title": "Compiling constraint networks into multivalued decomposable decision\ngraphs.",
|
| 402 |
+
"author": "Fr\u00e9d\u00e9ric Koriche, Jean-Marie Lagniez, Pierre Marquis, and Samuel\nThomas.",
|
| 403 |
+
"venue": "In Qiang Yang and Michael Wooldridge, editors, Proceedings of\nthe Twenty-Fourth International Joint Conference on Artificial Intelligence,\nIJCAI 2015, Buenos Aires, Argentina, July 25-31, 2015, pages 332\u2013338.\nAAAI Press, 2015.",
|
| 404 |
+
"url": null
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"34": {
|
| 409 |
+
"title": "Communication complexity.",
|
| 410 |
+
"author": "Eyal Kushilevitz and Noam Nisan.",
|
| 411 |
+
"venue": "Cambridge University Press, 1997.",
|
| 412 |
+
"url": null
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"35": {
|
| 417 |
+
"title": "An improved decision-dnnf compiler.",
|
| 418 |
+
"author": "Jean-Marie Lagniez and Pierre Marquis.",
|
| 419 |
+
"venue": "In Carles Sierra, editor, Proceedings of the Twenty-Sixth\nInternational Joint Conference on Artificial Intelligence, IJCAI 2017,\nMelbourne, Australia, August 19-25, 2017, pages 667\u2013673. ijcai.org, 2017.",
|
| 420 |
+
"url": null
|
| 421 |
+
}
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"36": {
|
| 425 |
+
"title": "Characterizing valiant\u2019s algebraic complexity classes.",
|
| 426 |
+
"author": "Guillaume Malod and Natacha Portier.",
|
| 427 |
+
"venue": "J. Complex., 24(1):16\u201338, 2008.",
|
| 428 |
+
"url": null
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"37": {
|
| 433 |
+
"title": "Compiling constraint networks into AND/OR multi-valued decision\ndiagrams (aomdds).",
|
| 434 |
+
"author": "Robert Mateescu and Rina Dechter.",
|
| 435 |
+
"venue": "In Fr\u00e9d\u00e9ric Benhamou, editor, Principles and\nPractice of Constraint Programming - CP 2006, 12th International\nConference, CP 2006, Nantes, France, September 25-29, 2006, Proceedings,\nvolume 4204 of Lecture Notes in Computer Science, pages 329\u2013343.\nSpringer, 2006.",
|
| 436 |
+
"url": null
|
| 437 |
+
}
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"38": {
|
| 441 |
+
"title": "AND/OR multi-valued decision diagrams (aomdds) for graphical\nmodels.",
|
| 442 |
+
"author": "Robert Mateescu, Rina Dechter, and Radu Marinescu.",
|
| 443 |
+
"venue": "J. Artif. Intell. Res., 33:465\u2013519, 2008.",
|
| 444 |
+
"url": null
|
| 445 |
+
}
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"39": {
|
| 449 |
+
"title": "Randomized Algorithms.",
|
| 450 |
+
"author": "Rajeev Motwani and Prabhakar Raghavan.",
|
| 451 |
+
"venue": "Cambridge University Press, 1995.",
|
| 452 |
+
"url": null
|
| 453 |
+
}
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"40": {
|
| 457 |
+
"title": "Dsharp: Fast d-dnnf compilation with sharpSAT.",
|
| 458 |
+
"author": "Christian J. Muise, Sheila A. McIlraith, J. Christopher Beck, and Eric I. Hsu.",
|
| 459 |
+
"venue": "In Leila Kosseim and Diana Inkpen, editors, Advances in\nArtificial Intelligence - 25th Canadian Conference on Artificial\nIntelligence, Canadian AI 2012, Toronto, ON, Canada, May 28-30, 2012.\nProceedings, volume 7310 of Lecture Notes in Computer Science, pages\n356\u2013361. Springer, 2012.",
|
| 460 |
+
"url": null
|
| 461 |
+
}
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"41": {
|
| 465 |
+
"title": "Factorized databases: A knowledge compilation perspective.",
|
| 466 |
+
"author": "Dan Olteanu.",
|
| 467 |
+
"venue": "In Adnan Darwiche, editor, Beyond NP, Papers from the 2016\nAAAI Workshop, Phoenix, Arizona, USA, February 12, 2016, volume WS-16-05\nof AAAI Technical Report. AAAI Press, 2016.",
|
| 468 |
+
"url": null
|
| 469 |
+
}
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"42": {
|
| 473 |
+
"title": "Size bounds for factorised representations of query results.",
|
| 474 |
+
"author": "Dan Olteanu and Jakub Z\u00e1vodn\u00fd.",
|
| 475 |
+
"venue": "ACM Trans. Database Syst., 40(1):2:1\u20132:44, 2015.",
|
| 476 |
+
"url": null
|
| 477 |
+
}
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"43": {
|
| 481 |
+
"title": "A top-down compiler for sentential decision diagrams.",
|
| 482 |
+
"author": "Umut Oztok and Adnan Darwiche.",
|
| 483 |
+
"venue": "In Qiang Yang and Michael J. Wooldridge, editors, Proceedings of\nthe Twenty-Fourth International Joint Conference on Artificial Intelligence,\nIJCAI 2015, Buenos Aires, Argentina, July 25-31, 2015, pages 3141\u20133148.\nAAAI Press, 2015.",
|
| 484 |
+
"url": null
|
| 485 |
+
}
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"44": {
|
| 489 |
+
"title": "New compilation languages based on structured decomposability.",
|
| 490 |
+
"author": "Knot Pipatsrisawat and Adnan Darwiche.",
|
| 491 |
+
"venue": "In Dieter Fox and Carla P. Gomes, editors, Proceedings of the\nTwenty-Third AAAI Conference on Artificial Intelligence, AAAI 2008,\nChicago, Illinois, USA, July 13-17, 2008, pages 517\u2013522. AAAI Press,\n2008.",
|
| 492 |
+
"url": null
|
| 493 |
+
}
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"45": {
|
| 497 |
+
"title": "A lower bound for the size of syntactically multilinear arithmetic\ncircuits.",
|
| 498 |
+
"author": "Ran Raz, Amir Shpilka, and Amir Yehudayoff.",
|
| 499 |
+
"venue": "SIAM J. Comput., 38(4):1624\u20131647, 2008.",
|
| 500 |
+
"url": null
|
| 501 |
+
}
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"46": {
|
| 505 |
+
"title": "The complexity of satisfiability problems.",
|
| 506 |
+
"author": "Thomas J. Schaefer.",
|
| 507 |
+
"venue": "In Richard J. Lipton, Walter A. Burkhard, Walter J. Savitch, Emily P.\nFriedman, and Alfred V. Aho, editors, Proceedings of the 10th Annual\nACM Symposium on Theory of Computing, May 1-3, 1978, San Diego, California,\nUSA, pages 216\u2013226. ACM, 1978.",
|
| 508 |
+
"url": null
|
| 509 |
+
}
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"47": {
|
| 513 |
+
"title": "The complexity of finite-valued csps.",
|
| 514 |
+
"author": "Johan Thapper and Stanislav Zivn\u00fd.",
|
| 515 |
+
"venue": "J. ACM, 63(4):37:1\u201337:33, 2016.",
|
| 516 |
+
"url": null
|
| 517 |
+
}
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"48": {
|
| 521 |
+
"title": "Properties that characterize LOGCFL.",
|
| 522 |
+
"author": "H. Venkateswaran.",
|
| 523 |
+
"venue": "In Alfred V. Aho, editor, Proceedings of the 19th Annual ACM\nSymposium on Theory of Computing, 1987, New York, New York, USA, pages\n141\u2013150. ACM, 1987.",
|
| 524 |
+
"url": null
|
| 525 |
+
}
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"49": {
|
| 529 |
+
"title": "Branching Programs and Binary Decision Diagrams.",
|
| 530 |
+
"author": "Ingo Wegener.",
|
| 531 |
+
"venue": "SIAM, 2000.",
|
| 532 |
+
"url": null
|
| 533 |
+
}
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"50": {
|
| 537 |
+
"title": "A proof of CSP dichotomy conjecture.",
|
| 538 |
+
"author": "Dmitriy Zhuk.",
|
| 539 |
+
"venue": "In Chris Umans, editor, 58th IEEE Annual Symposium on\nFoundations of Computer Science, FOCS 2017, Berkeley, CA, USA, October\n15-17, 2017, pages 331\u2013342. IEEE Computer Society, 2017.",
|
| 540 |
+
"url": null
|
| 541 |
+
}
|
| 542 |
+
}
|
| 543 |
+
],
|
| 544 |
+
"url": "http://arxiv.org/html/2311.10040v2"
|
| 545 |
+
}
|
20241004/2312.08064v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2312.10577v2.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "A fast fractional block-centered finite difference method for two-sided space-fractional diffusion equations on general nonuniform grids",
|
| 3 |
+
"abstract": "In this paper, a two-sided variable-coefficient space-fractional diffusion equation with fractional Neumann boundary condition is considered. To conquer the weak singularity caused by nonlocal space-fractional differential operators, a fractional block-centered finite difference (BCFD) method on general nonuniform grids is proposed. However, this discretization still results in an unstructured dense coefficient matrix with huge memory requirement and computational complexity. To address this issue, a fast version fractional BCFD algorithm by employing the well-known sum-of-exponentials (SOE) approximation technique is also proposed. Based upon the Krylov subspace iterative methods, fast matrix-vector multiplications of the resulting coefficient matrices with any vector are developed, in which they can be implemented in only operations per iteration without losing any accuracy compared to the direct solvers, where is the number of exponentials in the SOE approximation. Moreover, the coefficient matrices do not necessarily need to be generated explicitly, while they can be stored in memory by only storing some coefficient vectors. Numerical experiments are provided to demonstrate the efficiency and accuracy of the method.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Fractional partial differential equations provide a very adequate and competitive tool to model challenging phenomena involving anomalous diffusion or long-range memory and spatial interactions. For example, the space-fractional diffusion equations (SFDEs) can be used to describe the anomalous diffusion occurred in many transport processes BWM00; SLHS15; MK00; SZCR14.\nExtensive research has been conducted in the development of numerical methods for various SFDEs DJWWZ21; ER06; FZS22; HPLC19; LZWF22; LFWC19; MS16; PSXX21; SCZC19; TZD15; XLS22; ZLLB14. However, due to the nonlocal nature of the space-fractional differential operators, numerical discretizations tend to generate dense and full stiffness matrices. Traditionally, these methods are solved via the direct solvers such as Gaussian elimination (GE) method, which requires a computational complexity of order per time level and memory of order , where is the total number of spatial unknowns in the numerical discretization. Consequently, numerical simulations of SFDEs would lead to significantly increased memory requirement and computational complexity as increases. However, in the case of uniform spatial partitions, Toeplitz-like structure of the resulting stiffness matrices was discovered in WWS10 for space-fractional diffusion model, and thus based upon the special matrix structures, fast Krylov subspace iterative solvers are developed for different numerical methods of various SFDEs, in which both memory requirement and computational complexity have been largely reduced FLW19; JW15fb; JW16; LZFW21; PKNS14; WB12; WD13; WW11; ZZFL21.\nOn the other hand, it is shown that the solutions to SFDEs usually exhibit power function singularities near the boundary even under assumptions that the coefficients and right-hand side source term are sufficiently smooth EHR18; JLPR15; WY17. This motivates the usage of nonuniform grids to better capture the singular behavior of the solutions. Simmons SYM17 derived a first-order finite volume method on nonuniform grids for two-sided fractional diffusion equations. However, since the dense coefficient matrices do not have Toeplitz-like structure as the case of uniform grids, the previous developed fast algorithms are no longer be applicable. It makes sense to construct novel fast algorithms discretized on arbitrary nonuniform grids. Recently, finite volume methods based on special composite structured grids that consist of a uniform spatial partition far away the boundary and a locally refinement near the boundary were studied to address this issue DJWWZ21; JW15; JW19EAJAM; JW19, where Toeplitz-like structures of the coefficient matrices can be still found, and the Toeplitz structure of the diagonal blocks of the resulting block coefficient matrix was employed for efficient evaluate matrix-vector multiplications and the off-diagonal blocks were properly approximated by low-rank decompositions.\nHowever, there are still lack of fast numerical methods for SFDEs on general nonuniform grids. Recently, Jiang et al. JZZZ17 developed a novel sum-of-exponentials (SOE) technique for fast evaluation of the Caputo time-fractional derivative and applied to time-fractional diffusion equations. Then, by adopting the SOE technique on graded spatial grids, Fang et al. FZS22 proposed a fast first-order finite volume method for the one-dimensional SFDEs with homogeneous Dirichlet boundary condition. However, to our best knowledge, currently there seems rarely papers on construction of highly efficient numerical methods on general nonuniform spatial grids, and it is still a major challenge for the modeling of SFDEs.\nBlock-centered finite difference (BCFD) method, sometimes called cell-centered finite difference method, can be thought of as the lowest-order Raviart-Thomas mixed element method, by employing a proper numerical quadrature formula RT77. One of the most important merits of the BCFD method is that it can simultaneously approximate the primal variable and its flux to a same order of accuracy on nonuniform grids, without any accuracy lost compared to the standard finite difference method. Besides, the BCFD method can very easily deal with model problems with Neumann or periodic boundary conditions. In recent years, BCFD method has been widely used to simulate integer-order PDEs and time-fractional PDEs LL16; RP12; XXF22. However, due to the complexity of the nonlocal space-fractional differential operators, we do not see any report on BCFD method for space-fractional PDEs. Therefore, we aim to present a fractional type BCFD method for model (1.1 ###reference_###) with fractional Neumann boundary condition, in which nonuniform spatial grids are utilized to capture the boundary singularity of the solution and thus to improve the computational accuracy. Moreover, another main goal of this paper is to present a fast fractional BCFD algorithm to improve the computational efficiency of modeling the SFDEs, in which a fast Krylov subspace iterative solver based upon efficient matrix-vector multiplications is developed.\nThe present paper focuses on efficient and accurate numerical approximation of the following two-sided variable-coefficient SFDE with anomalous diffusion orders SBMW01:\nwhere represent the weighted fractional-order differential operators along direction:\nwith weighted parameters is a parameter describing the relative probability of particle traveling ahead or behind the mean velocity and positive diffusion coefficients , . Physically, the fractional derivative can be interpreted as a nonlocal Fickian law SBMW01. Moreover, and respectively denote the left/right-sided Riemann-Liouville fractional integrals, defined by P99\nSimilarly, the operator along direction can be also defined.\nWe assume that problem (1.1 ###reference_###) is subjected to the following initial condition\nand fractional Neumann boundary conditions BKMSS15; JW15fb; WWC18\nIn this paper, by introducing an auxiliary fractional flux variable, we first develop a fractional CN-BCFD method on general nonuniform grids for SFDEs (1.1 ###reference_###)\u2013(1.3 ###reference_###) in one space dimension, in which the Crank-Nicolson (CN) temporal discretization combined with the fractional BCFD spatial discretization are employed.\nThen, we develop fast approximation techniques to the left/right-sided Riemann-Liouville fractional integrals, using efficient SOE approximations to the kernels , and piecewise linear interpolations of the primal variable . Based upon these approximations, fast Krylov subspace iterative solvers for the fractional BCFD method is then proposed with fast matrix-vector multiplications of the resulting coefficient matrices with any vector. It is shown that the solver requires only operations per iteration with efficient matrix storage mechanism, where is the number of exponentials in the SOE approximation. Finally, ample numerical experiments are provided to demonstrate the efficiency and accuracy of the method.\nAs far as we know, this seems to be the first time that a fast fractional BCFD method is developed for the SFDEs, where fractional Neumann boundary condition is considered and general nonuniform spatial grids are adopted.\nThe rest of the paper is organized as follows. In Section 2 ###reference_###, we present the fractional CN-BCFD method on nonuniform spatial grids for the SFDEs. In Section 3 ###reference_###, a fast version fractional CN-BCFD method is proposed to further improve the computational efficiency. Then, we give some numerical experiments to investigate the accuracy and performance of the fast method in Section LABEL:sec:num. Some concluding remarks are given in the last section."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "A fractional CN-BCFD method on general nonuniform grids",
|
| 15 |
+
"text": "For simplicity of presentation, in this section we pay our attention to the one-dimensional version of (1.1 ###reference_###):\nwith . While, the extension to the two-dimensional case is straightforward but more complicated.\nIn the following, we introduce some notations.First, for positive integer , we define a uniform temporal partition of by , , with temporal stepsize . For temporal grid function , define\nNext, for positive integer , define a set of nonuniform spatial grids by\nwith grid size for and .\nBesides, set for as another set of nonuniform staggered spatial grids. Denote , , , . Furthermore, for spatial grid functions and , define\nTo propose a second-order fractional BCFD method for model (2.1 ###reference_###), we introduce the piecewise linear interpolation function for as follows:\nwhere , and and are defined by two-point extrapolations respectively as\nBy Taylor\u2019s expansion, it is easily to show that for smooth it holds\nIn this section, we aim to develop a direct fractional BCFD method on general nonuniform spatial grids for the one-dimensional SFDE (2.1 ###reference_###). To this aim, we introduce an auxiliary fractional flux variable\nwith\nThen, the original two-sided variable-coefficient SFDE model (2.1 ###reference_###) is equivalent to\nwhere is given by (2.3 ###reference_###)\u2013(2.4 ###reference_###).\nThe BCFD method can be thought of as a special mixed element method, in which on each element , the flux variable is approximated at the endpoint of each element, i.e., , while the primal variable is approximated at the midpoint of each element, i.e., . Denote the finite difference approximations for , and for , . Then, a second-order Crank-Nicolson semi-discretization scheme for (2.5 ###reference_###) reads as\nfor , where is defined by (2.3 ###reference_###)\u2013(2.4 ###reference_###) at time .\nWe next consider the spatial discretization of (2.6 ###reference_###) using the BCFD method on staggered grids and . The crucial step is the approximations to the fractional integrals and in (2.4 ###reference_###) at each grid point . To this aim, we first construct an approximation to the left-sided Riemann-Liouville fractional integral . By splitting the integral into two parts, of which the first part is an integral on half a grid interval, and then approximating the unknown function by its linear interpolation (2.2 ###reference_###), we get\nfor , and, in particular, for , (2.7 ###reference_###) reduces to\nA simple calculation shows that the coefficients in (2.7 ###reference_###)\u2013(2.8 ###reference_###) can be expressed as\nwith\nfor .\nNext, we pay attention to the approximation to the right-sided Riemann-Liouville fractional integral . Similarly, for , it can be approximated using the linear interpolation (2.2 ###reference_###) by\nand, in particular, for , (2.11 ###reference_###) reduces to\nSimilarly, the coefficients in (2.11 ###reference_###)\u2013(2.12 ###reference_###) are expressed as\nwith\nfor .\nNow, by combining (2.7 ###reference_###)\u2013(2.8 ###reference_###) and (2.11 ###reference_###)\u2013(2.12 ###reference_###) with (2.6 ###reference_###), a fully discrete fractional CN-BCFD scheme is proposed as follows:\nwhere , , and .\nMoreover, let\nwhere refers to the transpose of the vector. Then, by canceling the flux variable, we can present the fractional CN-BCFD scheme (2.15 ###reference_###) in a more compact matrix form with respect to :\nwhere represents the identity matrix of order , and is a stiffness matrix of order with entries corresponding to such that\nwhere, for simplicity, hereafter we denote and , and\nand\nIt can be observed that the resulting stiffness matrix is actually a full and dense matrix with a complicated structure, which requires memory for storage. Furthermore, at each time level, traditional Gaussian type direct solvers require computational complexity, and Krylov subspace iterative solvers also require computational complexity per iteration. Therefore, the implementation of (2.16 ###reference_###) is indeed computationally expensive, especially for large-scale modeling and simulation. And thus, an efficient solution method for (2.16 ###reference_###) is of course highly demanded. We shall discuss this issue in next section.\nIn the case of uniform spatial partition, i.e., , the stiffness matrix in (2.17 ###reference_###) can be expressed as\nwhere\nand the matrices and can be expressed into the following blocks\nThe submatrices and of order represent the stiffness matrices corresponding to the nodes and , respectively. The submatrix of order -by- represents the coupling between the nodes and the nodes , while of order -by- represents the coupling between the nodes and the nodes . Finally, the submatrices and of order represent the stiffness matrices corresponding to the nodes and , respectively, and both of them have special Toeplitz structures such that\nwhere\nThus, the stiffness matrix has Toeplitz-like structure, and therefore, the resulting linear algebraic system (2.16 ###reference_###) can be solved via the fast Fourier transform (FFT) approach FLW19; JW15; LZFW21; LFWC19; WB12; ZZFL21. However, due to the singularity caused by the fractional operators, it is preferred to use nonuniform spatial grids to better capture the singular behavior, and that would destroy the special matrix structure and prevent the use of previous developed fast algorithms.\nFor and , it can be proved that the coefficient matrix in (2.16 ###reference_###) is positive define if constant coefficients and uniform grids are considered.\nThus, the fractional CN-BCFD method (2.15 ###reference_###) is uniquely solvable in such case. However, more advanced mathematical techniques are required for the analysis of non-uniform grids and general diffusion coefficients, which presents a significant challenge for future work."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "A fast fractional CN-BCFD method on general nonuniform grids",
|
| 21 |
+
"text": "In this section, we present a fast version fractional CN-BCFD method based on Krylov subspace iterative methods for the SFDEs (1.1 ###reference_###). For ease of exposition, we prefer to utilize the biconjugate gradient stabilized (BiCGSTAB) method, which has faster and smoother convergence than other Krylov subspace methods.\nWe remark that in the standard BiCGSTAB method, the evaluations of matrix-vector multiplications require computational complexity per iteration and memory for a dense and full matrix, while all other computations require only computational complexity and memory. Therefore, it is essential to construct a fast fractional CN-BCFD method based on the BiCGSTAB solver by developing a fast matrix-vector multiplication mechanism and an efficient matrix storage approach.\nIn this section, the SOE technique JZZZ17, which was originally proposed to fast evaluation of the time-fractional derivative, combined with the developed fractional BCFD method will be applied to approximate the space-fractional diffusion equations. To develop a mechanism for efficient storage of the coefficient matrix and fast matrix-vector multiplications, we also split the left/right-sided Riemann-Liouville fractional integrals into two parts \u2013 a local part and a history part, i.e.,\nfor .\nWithout loss of generality, below we only pay attention to the derivation of the approximation formula for the left-sided Riemann-Liouville fractional integral by the SOE technique, while it is analogous for the right-sided Riemann-Liouville fractional integral. First, for the case , the left-sided Riemann-Liouville fractional integral can be calculated directly as in Section 2 ###reference_### since it only has local part\nFor , as the local part contributes few memory and computational cost compared with the history part , we calculate the local part directly using (2.2 ###reference_###) as\nwhere\nThen, all the remains are to approximate the integral on the interval in (3.1 ###reference_###) efficiently and accurately. To this aim, we revisit the SOE approximation for the weak singularity kernel function below.\nFor given , an absolute tolerance error , a cut-off restriction and a given position , there exists a positive integer , positive quadrature points and corresponding positive weights satisfying\nwhere the number of exponentials satisfies\nMotivated by the above lemma, we replace the convolution kernel by its SOE approximation in (3.6 ###reference_###) and by its linear interpolation in (2.2 ###reference_###), then the history part in (3.1 ###reference_###) is approximated as follows:\nfor , where\nA simple calculation shows that the integral in (3.8 ###reference_###) can be computed recursively as\nwhere\nwith\nNote that at each time step, we only need work to compute , as is already known at that point. Therefore, the evaluation of the convolution (3.8 ###reference_###) can be accelerated via the recurrence relation (3.9 ###reference_###)\u2013(3.10 ###reference_###).\nLet be the fast numerical approximation to left-sided Riemann-Liouville fractional integral for . Therefore, combining (3.1 ###reference_###), (3.3 ###reference_###), (3.4 ###reference_###) and (3.7 ###reference_###) together, we obtain a fast evaluation formula for the left-sided Riemann-Liouville fractional integral as\nwith , computed recursively by (3.9 ###reference_###)\u2013(3.10 ###reference_###) for each index .\nAnalogously, let be the fast numerical approximation to right-sided Riemann-Liouville fractional integral for . By using the same approach as above, the fast approximation for the right-sided Riemann-Liouville fractional integral is proposed as follows\nwhere\nand the integral\n satisfies the following recursive formula\nThe coefficients in (3.15 ###reference_###) are defined by\nwith\nNow, by combining the above approximations together with (2.6 ###reference_###), we get the following fast version fractional CN-BCFD scheme:\nwith and defined by (3.12 ###reference_###) and (3.13 ###reference_###), respectively.\nAgain, let and be defined as in Section 2 ###reference_###, we can rewrite the fast version fractional CN-BCFD scheme (3.16 ###reference_###) into the following matrix form:\nwhere the stiffness matrix of order has a special matrix representation\nwith corresponds to fast discretization of , and the entries of the matrix-vector multiplication are given by\nBased upon the above discussions, we develop a fast version BiCGSTAB iterative method for (3.17 ###reference_###), where matrix-vector multiplications for any can be computed in an efficient way. Basically, we have the following conclusions.\nThe matrix-vector multiplication for any can be carried out in operations, where .\nLet denote the -dimensional column vector. Then, according to equation (3.19 ###reference_###), the matrix-vector multiplication for any is of the form\nwhere denotes the Hadamard (element by element) product of two vectors, and has the same mathematical expression as defined by (3.12 ###reference_###) just with replaced by . Note that this step only needs operations. While, the computations of in (3.20 ###reference_###), see that of in (3.12 ###reference_###) require operations in total. This is due to that in the fast version fractional CN-BCFD scheme (3.16 ###reference_###), can be computed by the recurrence formula (3.10 ###reference_###) in only work for each , and thus each costs only operations. In summary, the matrix-vector multiplication can be evaluated in operations.\nThe matrix-vector multiplication for any can be carried out in operations, where ."
|
| 22 |
+
}
|
| 23 |
+
],
|
| 24 |
+
"appendix": [],
|
| 25 |
+
"tables": {},
|
| 26 |
+
"image_paths": {},
|
| 27 |
+
"validation": true,
|
| 28 |
+
"references": [],
|
| 29 |
+
"url": "http://arxiv.org/html/2312.10577v2"
|
| 30 |
+
}
|
20241004/2401.04660v2.json
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Distributed Data-driven Unknown-input Observers for State Estimation",
|
| 3 |
+
"abstract": "Unknown inputs related to, e.g., sensor aging, modeling errors, or device bias, represent a major concern\nin wireless sensor networks, as they degrade the state estimation performance. To improve the performance, unknown-input observers (UIOs) have been proposed. Most of the results available to design UIOs are based on explicit system models, which can be difficult or impossible to obtain in real-world applications.\nData-driven techniques, on the other hand, have become a viable alternative for the design and analysis of unknown systems using only data. In this context, a novel data-driven distributed unknown-input observer (D-DUIO) for unknown continuous-time linear time-invariant (LTI) systems is developed, which requires solely some data collected offline, without any prior knowledge of the system matrices. In the paper, first, a model-based approach to the design of a DUIO is presented.\nA sufficient condition for the existence of such a DUIO is recalled, and a new one is proposed, that is prone to a data-driven adaption.\nMoving to a data-driven approach,\nit is shown that under suitable assumptions on the input/output/state data collected from the continuous-time system, it is possible to both claim the existence of a D-DUIO and to derive its matrices in terms of the matrices of pre-collected data.\nFinally, the efficacy of the D-DUIO is illustrated by means of numerical examples.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "In dynamical control systems, distributed state estimation (DSE) approaches play a vital role, and a multitude of well-established tools have been developed, including consensus Kalman-based filtering Olfati-Saber (2005 ###reference_b27###); Chen et al. (2016 ###reference_b8###), Luenberger-like consensus estimation Mill\u00e1n et al. (2013 ###reference_b24###), and distributed moving-horizon estimation Brouillon et al. (2023 ###reference_b5###), to name a few. DSE has a wide range of real-world applications, including power system monitoring, cooperative tracking and localization, and smart transportation; see, e.g., Ahmad et al. (2017 ###reference_b1###); Farina et al. (2010 ###reference_b15###) and references therein.\nHowever, practical concerns about the deployment of DSE methods exist. For instance, unknown inputs caused by sensor aging, modeling errors, calibration bias, and/or external disturbances/attacks can lead to severe deterioration in estimation performance Trimpe and D\u2019Andrea (2014 ###reference_b30###); Shmaliy et al. (2018 ###reference_b29###). Among different tools to tackle the estimation problem in the presence of unknown inputs, unknown-input observers (UIOs) have attracted recurring attention due to their geometric decoupling capabilities Valcher (1999 ###reference_b32###); Nazari and Shafai (2019 ###reference_b26###).\nA distributed UIO (DUIO) was first implemented in Chakrabarty et al. (2016 ###reference_b7###), to estimate\nthe internal states of the nonlinear subsystems using local measurement\noutputs.\nMore recently, a distributed UIO was developed for a continuous-time LTI system in Yang et al. (2022 ###reference_b36###) by resorting to a consensus strategy, in which the global system state is estimated consistently by each local observer with limited information about the input and output.\nA similar strategy was also employed in Cao and Wang (2023 ###reference_b6###), but with a different structure for the observer gain matrices, based on the decomposition of the state space at each node into detectability/undetectability subspaces.\nIt is worth noting that all of the previous results about DUIOs\nwere derived\nassuming that the original\nsystem models were known. However, obtaining accurate system models for interconnected cyber-physical systems, either from first-principles or through system identification methods, is becoming increasingly difficult or even impossible. To address this challenge, data-driven control\nmethods have gained attention in the big data era, aiming to design controllers directly from data without relying on intermediate system identification procedures, as described, e.g., in Hou and Jin (2013 ###reference_b19###); De Persis and Tesi (2020 ###reference_b12###). Recent efforts leveraging Willems et al.\u2019s fundamental lemma Willems et al. (2005 ###reference_b34###) have addressed data-driven predictive control Coulson et al. (June, 25-28, 2019 ###reference_b9###); Berberich et al. (2020 ###reference_b2###),\ndata-driven event-triggered and consensus control Li et al. (2023 ###reference_b20###), and data-driven observers Mishra et al. ###reference_b25###.\nHowever, data-driven state estimation with unknown inputs has received only partial attention up to now. In Shi et al. (2022 ###reference_b28###), a data-driven input reconstruction method from outputs was developed to design inputs. The work Turan and Ferrari-Trecate (2021 ###reference_b31###)\ninvestigated the data-driven UIO problem for unknown linear systems,\nwith the goal of estimating the state even in the presence of unknown disturbances.\nThis work was recently extended in Disar\u00f2 and Valcher (2024 ###reference_b14###), where\nnecessary and sufficient conditions for the problem solution, as well as a parametrization of all possible solutions, based only on data, were provided.\nNevertheless, all these studies have only considered centralized systems, and to the best of our knowledge, no results for data-driven DSE have been reported.\nThis paper aims to fill this gap by developing a distributed data-driven UIO scheme for a continuous-time unknown linear system subject to unknown inputs and disturbances. Specifically, we introduce a novel data-driven DUIO (D-DUIO) designed using offline input/output/state data without performing any system identification. The D-DUIO with a consensus strategy allows the estimation of the unknown global system state through local information exchanges between neighboring nodes, even when no node has access to the complete input information. It is shown that, under mild conditions, the local state estimates obtained by the nodes reach consensus and converge to the true state of the unknown system.\nIn summary, the contributions of this work are the following:\nBy resorting to a model-based approach, we recall a sufficient condition - already derived in the literature - for the proposed DUIO to provide a state estimate that asymptotically converges to the true state of the system, and we propose a new sufficient condition, that, even if slightly stronger, is more suitable to be adapted to a data-driven context.\nBy leveraging the results in Turan and Ferrari-Trecate (2021 ###reference_b31###); Disar\u00f2 and Valcher (2024 ###reference_b14###), we provide necessary and sufficient conditions to verify using only the collected data whether the above mentioned sufficient condition for the existence of a DUIO is satisfied.\nWe explicitly provide the data-driven expression of the matrices of the proposed D-DUIO.\nTo make the paper flow smoother, all the proofs have been moved to the Appendix.\nFor convenience, we introduce some notation.\nThe sets of real numbers, nonnegative real numbers and nonnegative integers are denoted by and , respectively.\n denotes the identity matrix of size , the zero matrix of size , and the all-one vector of size . Suffixes will be omitted when the dimensions can be deduced from the context.\nThe Moore-Penrose pseudoinverse of a matrix\n is denoted by .\nWe use to represent the kernel space of and to represent its column space.\nThe spectrum of a square matrix is denoted by and is the set of all its eigenvalues. For a symmetric matrix , we use to denote the smallest eigenvalue of . A symmetric matrix is positive definite if for every . When so, we adopt the notation .\nThe Kronecker product is denoted by .\nGiven matrices ,\nthe block-diagonal matrix whose th diagonal block is the matrix is denoted by .\nWe also use to denote the block diagonal matrix with diagonal blocks ."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries and Problem Formulation",
|
| 15 |
+
"text": "The problem set-up we adopt is analogous to those adopted in Yang et al. (2022 ###reference_b36###) and Cao and Wang (2023 ###reference_b6###). Specifically, we\nconsider a continuous-time LTI system\nwhere ,\n is the state, is the control input, is the unknown process disturbance,\n, , and .\nA wireless sensor network comprising heterogeneous sensor nodes is deployed to monitor the state of system (1 ###reference_###). At each time instant, each node of the network provides a measured\noutput signal , given by\nwhere .\nMoreover,\nwe assume that each sensor node has access only to a subset of the input entries, and hence\nfor every , we can split the entries of the control input into two parts: the measurable part and the unknown part . Consequently, we can always express as:\nwhere , , , . Since is also unknown for each node, the overall unknown input at node and the associated system matrix can be represented as\nConsequently, for every ,\nthe system dynamics, from the perspective of the th sensor node, is given by:\nIt is worthwhile noticing that the specific expression of will play no role in the following, and hence we can always assume, possibly redefining , that the matrix is of full column rank (see Yang et al. (2022 ###reference_b36###)). In the following, we will denote by the system described by the pair of equations\n(1 ###reference_###)\u2013(2 ###reference_###) or, equivalently, by the pair (5 ###reference_###)\u2013(2 ###reference_###).\nThe objective of each node is to reconstruct the global state of the system, by exchanging information with other nodes.\nSpecifically, we assume that\nthe sensor network is represented by a graph , where is the set of sensor nodes, is the set of communication links, through which nodes can exchange information, and is the nonnegative weighted adjacency matrix, where if , and otherwise.\nThe degree matrix of is , where , for every . The Laplacian matrix associated with the network is .\nThe graph is undirected and connected.\n(Laplacian matrix) \nIt follows from Assumption 1 ###reference_umption1### that the Laplacian associated with the graph is symmetric and irreducible. Therefore, its spectrum is of the form\nWe also note that is a compartmental matrix (see Haddad et al. (2002 ###reference_b18###)), since its off-diagonal entries are nonnegative (and hence it is a Metzler matrix) and the sum of the entries in each of its columns is nonpositive, i.e., . Therefore, is an irreducible compartmental matrix, and it satisfies the (stronger) condition .\nGiven any ,\nif we denote by the matrix obtained from by removing the th row and the th column, namely the entries related to the connections of node , we have that , and is still a compartmental matrix and it is Hurwitz (see Lemma 3 ###reference_ma3### in Appendix A ###reference_###).\nConsequently, , and hence .\n###figure_1### We assume that the th sensor node generates the state estimate\nat time , , through a DUIO described as follows:\nwhere is the state of the th UIO (), is the estimate of system (1 ###reference_###) state provided by node . The coefficients are the entries of the communication graph adjacency matrix, while , , , are matrix parameters to be designed.\nIn the proposed set-up,\nwe provide a first qualitative statement of the estimation problem we address in the paper.\nGiven the systems , , subject to unknown inputs and disturbances,\nand communicating through a graph , satisfying Assumption 1 ###reference_umption1###,\ndetermine, if possible, the matrices , and of\nthe distributed state estimation scheme (6 ###reference_###)\nin such a way that the state estimates provided by the observers across all nodes\nachieve consensus and the common state estimate converges to the real state value."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Distributed Model-based State Estimation",
|
| 21 |
+
"text": "If all the system matrices , are known,\nthe problem is analogous to the one investigated in Yang et al. (2022 ###reference_b36###) and Cao and Wang (2023 ###reference_b6###), where\nsufficient conditions for the solvability of the DUIO problem have been provided by means of a model-based approach. However, the set-up considered here is a bit more general, since it does not impose any particular structure on the gain matrices weighting the consensus term (see comments after Lemma 1 ###reference_ma1###).\nUpon defining the global estimation error by\nconcatenation as ,\nwhere is the estimation error of node . Similarly to (Valcher, 1999 ###reference_b32###, Sections 2\u20133) and (Yang et al., 2022 ###reference_b36###, Section 4) (see also Cao and Wang (2023 ###reference_b6###)),\ntaking the time derivative of yields\nwhere , , and are defined analogously to , while . The derivation of (7 ###reference_###) is detailed in Appendix B ###reference_###.\nClearly, the estimation error dynamics is independent of\nthe disturbance, the unknown input and the specific input and state trajectories if and only if the following conditions are satisfied\nWhen so, equation (7 ###reference_###) becomes\nTo guarantee that the conditions in (8 ###reference_###) are feasible, we state\nthe following result, whose proof is a trivial extension of the single agent case Darouach et al. (1994 ###reference_b11###); Darouach (2009 ###reference_b10###) (indeed, all matrices in\n(8 ###reference_###) are block diagonal),\nand hence omitted.\nThe following facts are equivalent:\nEquations (8 ###reference_###) are simultaneously solvable;\nFor every , there exists such that\nFor every , .\nIt is worthwhile remarking that the solutions of (10 ###reference_###) can be parametrized as follows 111We assumed that is of full column rank. In case it is not, we can always factorize it as\n, where is of full column rank and is of full row rank .\nWhen so, the parametrization can be expressed as follows\n .\n(see Darouach et al. (1994 ###reference_b11###)):\nwhere is a free parameter. In the sequel, we will refer to the particular solution\n of (10 ###reference_###) with the symbol .\nIn order to ensure that the estimation error asymptotically converges to zero, we need to also impose that\n is Hurwitz stable.\nIn Yang et al. (2022 ###reference_b36###) and Cao and Wang (2023 ###reference_b6###), it has been shown that, under any of the equivalent conditions of Lemma 1 ###reference_ma1###, a sufficient condition for the existence of matrices such that is Hurwitz stable is that the intersection of the undetectable subspaces of the pairs is the zero subspace.\nWhen so, a possible choice for the matrices is either (see Cao and Wang (2023 ###reference_b6###)), where is the undetectable subspace of the pair , or (see Yang et al. (2022 ###reference_b36###)), where is a symmetric and positive definite matrix that arises from the solution of a suitable LMI. In both cases, is a positive scalar parameter that drives all the eigenvalues of toward\nthe left open half-plane as it grows to .\nWhen addressing the distributed UIO design problem from a data-driven perspective, checking\nif the intersection of the undetectable subspaces is the zero subspace is not feasible, since the test would be too complicated and not robust to numerical errors.\nFor this reason we explore a stronger sufficient condition that is easy, as well as robust, to test.\nThere exists such that the pair is detectable.\nWithout loss of generality, we assume that , since we can always relabel the agents to make this happen.\nIt is clear that if for every and\nAssumption 2 ###reference_umption2### holds,\nthen the intersection of the undetectable subspaces of the pairs is the zero subspace, and hence there exist matrices , such that \nis Hurwitz stable. We provide in the following an explicit solution to the distributed estimation problem, namely a specific choice of the matrices , and .\n(Construction of a model-based DUIO) \nSuppose that Assumptions 1 ###reference_umption1### and 2 ###reference_umption2### (for ) hold, and , for every . Let be such that is Hurwitz stable. Set\nwith\nwhere is the smallest eigenvalue of (see Remark 1 ###reference_ark1###), and\n\nThen, for this choice of the matrices, the model-based DUIO in (6 ###reference_###) can reconstruct the system state asymptotically.\nNote that the procedure proposed here leads to a rather simplified solution since it suffices to stabilize the leader agent and then choose a suitable to stabilize also the other agents. Therefore, the matrices for play no role, except in setting the bound on , which is the reason why we imposed conditions (12b ###reference_.2###)."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "Distributed Data-driven State Estimation",
|
| 27 |
+
"text": "Throughout this section, we make the following additional assumption on the systems .\n(Unknown system model)\nFor each , the matrices of the system are unknown.\nUnder the previous Assumption, the model-based method for designing DUIOs described in the previous section does not apply anymore. Hence, to solve Problem 1 ###reference_blem1###,\nwe explore the possibility of designing a DUIO based on data.\nIn industrial processes, it is not always feasible or safe to measure real-time states and transmit them to remote sensors. However, offline experiments can be conducted to gather state data, which are further sent to remote sensors\nfor the design of state observers that can operate online.\nIn line with recent studies Berberich et al. (2020 ###reference_b2###, Dec. 14-18, 2020 ###reference_b4###); Turan and Ferrari-Trecate (2021 ###reference_b31###); Liu et al. (2023a ###reference_b21###), we assume that input/output/state data can be collected and make the\nfollowing assumption.\n(Offline and online data acquisition)\nDuring the offline phase, input/output/state sampled data , , , and are collected locally by each system described as in (1 ###reference_###)\u2013(2 ###reference_###),\npossibly corresponding to different initial states and inputs222 No special requirement is imposed on the sampling times . Indeed, each agent could have\ndifferent sampling times, say ..\nDuring online operation, only the inputs , outputs and the output derivatives , , are available.\nAssumption 4 ###reference_umption4### indicates that although the matrices are unknown, the measurable input and\noutput data are available both offline and online. On the other hand, states can only be obtained through offline experiments, but are not available during online operations. This setting can be fulfilled in a remote control scenario, and it has been widely considered in the data-driven state-estimation literature, see e.g., Turan and Ferrari-Trecate (2021 ###reference_b31###); Disar\u00f2 and Valcher (2024 ###reference_b14###); Liu et al. (2023b ###reference_b22###); Wolff et al. (2024 ###reference_b35###).\nWhen the state and the output derivatives are not physical quantities, their computation is likely to be error-prone. However, after having recorded the state and output trajectories with a high sampling rate, we can obtain a good approximation of their values in a post-processing step, since the data are collected in an offline phase. We can explicitly account for errors in the computation of the derivatives by modeling these errors as a measurement noise (see Berberich et al. (2021 ###reference_b3###)). Alternatively, when the derivatives are difficult to compute, we can resort to an integral version of the relation in (5 ###reference_###) and (2 ###reference_###), which leads to an equivalent characterization, as it is shown in De Persis et al. (2024 ###reference_b13###). For the sake of simplicity, we carry on the analysis using the derivatives. However, all the results can be adapted with no further effort to use the integral representation of the data.\nUnder Assumption 4 ###reference_umption4###, we define for every the following matrices:\nIn addition, even if we cannot measure the unknown input , it is convenient to introduce the sequence and the corresponding matrix\nWe make the following assumption on the pre-collected data.\n(Rank of pre-collected data) For each , it holds that\n(Conservativeness of Assumption 5 ###reference_###)\n\nAs it will be proved in Theorem 2 ###reference_orem2###, Assumption 5 ###reference_### ensures that any input/output/state trajectory of system can be represented as a linear combination of\nthe columns of\n.\nIn the case of discrete-time systems, according to Willems et al.\u2019s fundamental lemma Willems et al. (2005 ###reference_b34###), Assumption 5 ###reference_### is fulfilled when the pair is controllable and the input and disturbance signal\n is persistently exciting of order (see Turan and Ferrari-Trecate (2021 ###reference_b31###); Disar\u00f2 and Valcher (2024 ###reference_b14###)).\nThe relationship between\nAssumption 5 ###reference_### and persistence of excitation is more involved for continuous-time systems, and we refer the interested reader to Lopez and M\u00fcller (2022 ###reference_b23###).\nHowever, since no constraint is imposed on the sampling times , it is easy to conceive experiments in such a way that the collected data satisfy Assumption 5 ###reference_###.\nFinally, it is worth emphasizing that Assumption 5 ###reference_### does not allow system identification from the collected data. Specifically, the presence of unmeasured disturbances in \ndoes not guarantee the possibility of identifying and from data.\nIn the rest of the paper we will focus on this revised version of Problem 1 ###reference_blem1###.\nGiven the unknown systems , , subject to unknown inputs and disturbances, and satisfying Assumptions\n1 ###reference_umption1###\u20135 ###reference_###, design, if possible, a distributed state estimation scheme described as in (6 ###reference_###),\nwhose matrices are derived from the offline data,\nsuch that the state estimates provided by the observers across all nodes\nachieve consensus and the common state estimate converges to the real state value.\nTo address Problem 2 ###reference_blem2###, we build upon the data-driven UIO for a single agent\nproposed in (Turan and Ferrari-Trecate, 2021 ###reference_b31###, Section II) in a discrete-time setting and develop its distributed version for continuous-time systems.\nThe main ideas behind the data-driven UIO proposed in Turan and Ferrari-Trecate (2021 ###reference_b31###) are substantially two.\nFirst of all, a UIO for a single sensor , described as in (6 ###reference_###),\nincludes among its input/output trajectories all the\n(control) input/output/state trajectories of system (see (Turan and Ferrari-Trecate, 2021 ###reference_b31###, Remark 3)).\nSecondly, if the historical data are sufficiently rich to capture the dynamics of\nthe (control) input/output/state trajectories of system , then they can be used to design the matrices of the th UIO ().\nWe will follow a similar path and first derive (see Theorem 2 ###reference_orem2###) conditions that ensure that historical data allow to identify the online trajectories of .\nSubsequently, in Theorem 3 ###reference_orem3###, we propose a sufficient condition for the existence of a data-driven DUIO, namely a DUIO described as in (6 ###reference_###), whose matrices are obtained from the historical data. The explicit expression of these matrices as well as a possible parametrization of them is given in Theorem 4 ###reference_orem4###."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "4.1",
|
| 31 |
+
"parent_section_id": "4",
|
| 32 |
+
"section_name": "Consistency of Offline and Online Trajectories",
|
| 33 |
+
"text": "Inspired by (Turan and Ferrari-Trecate, 2021 ###reference_b31###, Lemma 1) (see also Lemma 7 in Disar\u00f2 and Valcher (2024 ###reference_b14###)), in Theorem 2 ###reference_orem2### we establish the consistency of offline and online trajectories affected by unknown inputs and noise. We first recall the concept of data compatibility (Turan and Ferrari-Trecate, 2021 ###reference_b31###, Definition 5), which determines whether the historical data are sufficiently representative of system trajectories.\n(Data compatibility)\nAn input/output/ \nstate trajectory \n is compatible with the historical data if the following condition holds\nwhere , , , , and are defined in (4 ###reference_###). The set of trajectories compatible with the historical data is defined as\nThe set of input/output/state trajectories compatible with the equations of system is defined as follows\n(Consistency of offline and online trajectories)\nUnder Assumptions\n3 ###reference_umption3###\u20135 ###reference_###,\nfor every , the set of trajectories compatible with the offline data coincides with the set of trajectories of the system , i.e., ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4.2",
|
| 37 |
+
"parent_section_id": "4",
|
| 38 |
+
"section_name": "Existence and construction of a D-DUIO",
|
| 39 |
+
"text": "To extend the analysis carried on in Section 3 ###reference_###, under Assumption 2 ###reference_umption2###, we first need to understand how one can deduce from data the existence of an agent for which Assumption 2 ###reference_umption2### holds and the fact that for every agent one of the equivalent conditions of Lemma 1 ###reference_ma1### holds.\nWe introduce the following technical result.\n(Solvability conditions in terms of collected data) \nSuppose that Assumptions\n3 ###reference_umption3###\u20135 ###reference_### hold.\nThen\nCondition\n (i.e., condition iii) of Lemma 1 ###reference_ma1###) holds if and only if\nAssumption 2 ###reference_umption2### holds if and only if there exists such that\nAs an immediate consequence of Lemma 2 ###reference_ma2### and of Theorem 1 ###reference_orem1###, we can claim what follows.\n(Existence of D-DUIO)\nIf the communication graph is undirected and connected (namely, Assumption 1 ###reference_umption1### holds), condition\n(19 ###reference_###) holds for one index (namely, Assumption 2 ###reference_umption2### holds), say ,\nand condition\n(18 ###reference_###) holds for every , then there exists a distributed UIO in (6 ###reference_###) that asymptotically estimates the state of the original system.\nThe previous result provides a way to check on data\nthe existence of an asymptotic D-DUIO. We want now to enable the explicit construction of the matrices of such an asymptotic D-DUIO using only the collected data. \nTo this end, we preliminarily notice that\neach is of full row rank, as a result of Assumption 5 ###reference_###, and hence from\n, we can immediately deduce for every as\nWe are now ready to prove our main result.\n(Construction of D-DUIO)\nSuppose that the communication graph is undirected and connected (namely, Assumption 1 ###reference_umption1### holds), that condition\n(19 ###reference_###) holds for a single index (namely, Assumption 2 ###reference_umption2### holds), say ,\nand that condition\n(18 ###reference_###) holds for every . Then:\nFor every there exist matrices and , of suitable dimensions, with\n, such that\nMoreover, for the pair is detectable.\nFor every solution and of (20 ###reference_###) for which\n, the pair is detectable.\nLet be a matrix such that is Hurwitz stable.\nIf we assume\nand is chosen such that\n(13 ###reference_###) holds,\nwhere\n,\nthen the distributed UIO in (6 ###reference_###), for this choice of the matrices, can reconstruct the system state asymptotically."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Simulation Results",
|
| 45 |
+
"text": "This section\npresents a numerical example to illustrate the performance of the proposed D-DUIO.\nThe performance of this method is compared to those of a system identification-based (ID) DUIO, as well as of a DUIO based on the exact system model.\nThe comparison\ndemonstrates the effectiveness of the D-DUIO."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5.1",
|
| 49 |
+
"parent_section_id": "5",
|
| 50 |
+
"section_name": "Performance of D-DUIO",
|
| 51 |
+
"text": "Consider a two-mass-spring system with external disturbances Li et al. (2023 ###reference_b20###) represented by (1 ###reference_###)\u2013(2 ###reference_###) and a wireless sensor network consisting of nodes. The undirected and connected communication graph is shown in Fig. 2 ###reference_###. The system matrices are given by\nwith unknown inputs , , and process noise randomly generated from .\nFor every , we assume that the known inputs are generated by the autonomous system , with and initial condition whose entries are randomly generated in .\nThe outputs of the target system are observed using five nodes whose matrices are given respectively by\n###figure_2### The noisy historical input/output/state trajectories at each node are collected from the linear system (22 ###reference_###)\u2013(5.1 ###reference_###) with a random initial state. We assume to collect samples. Moreover, following Theorem 1 ###reference_orem1###, the parameter of the D-DUIO is set to .\nWe present the estimation performance of the D-DUIO in Fig. 3 ###reference_###, which shows the state estimates obtained over the simulation window . The plots indicate that all nodes achieve consensus on state estimates. Moreover, the state estimation errors shown in Fig. 4 ###reference_### converge to asymptotically. This demonstrates that inaccurate estimation arising from unknown inputs and disturbances can be overcome by the proposed D-DUIO."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5.2",
|
| 55 |
+
"parent_section_id": "5",
|
| 56 |
+
"section_name": "Comparison with other DSE Methods",
|
| 57 |
+
"text": "The proposed D-DUIO as well as two other DSE methods, namely model-based DUIO and ID-DUIO approaches, are numerically compared in this section.\nFor ID-DUIO, the coupling matrices of unknown inputs and noises are assumed known, and the system matrices are identified by the least-squares method using the same set of offline data. The matrices of DUIO and ID-DUIO are obtained by solving equations (8 ###reference_###), computed using the CVX toolbox Grant and Boyd (2014 ###reference_b17###). Parameter of the two methods is set to .\nThe model-based DUIO demonstrates superior estimation performance in Figs. 5 ###reference_###\u20136 ###reference_###. Figs. 7 ###reference_### \u20138 ###reference_### illustrate that ID-DUIO exhibits inferior performance compared to the other two methods. Due to the unknown disturbance in the offline data, it is impossible to\ndetermine the original system model using identification methods. Therefore, the trajectories generated by the ID-DUIO are not fully compatible with the target system trajectories. The proposed D-DUIO method outperforms ID-DUIO and achieves competitive performance with the model-based DUIO.\n###figure_3### ###figure_4### ###figure_5### ###figure_6### ###figure_7### ###figure_8### To further compare the performance of the three methods, the evaluation metrics including mean-squared error (MSE) and mean-absolute error (MAE) are employed based on independent Monte Carlo experiments.\nThe MSE of sensor during an experiment over the time interval is defined as , where is the true state of the target system, and is the state estimated by node during the experiment.\nFor convenience of notation, we denote by the MSE of sensor during the th experiment.\nThe MSE of all estimates during the th experiment becomes . After independent experiments, the MSE becomes . The MAE is defined analogously.\nThe proposed D-DUIO method shows a significant improvement in MSE and MAE relative to ID-DUIO. The difference between D-DUIO and DUIO in MSE and MAE of are and , respectively. Compared to the other two DSE methods, the effectiveness of the proposed D-DUIO is demonstrated."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "6",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Conclusions",
|
| 63 |
+
"text": "In this paper, we investigated the problem of designing\na DUIO for a continuous-time LTI system, subject to unknown inputs and\nprocess disturbances, such that the state\nestimation error asymptotically converges to zero.\nFirst, we analyzed the problem using a model-based approach and derived a new sufficient condition to ensure that the state estimates of the proposed DUIO converge to the true system state asymptotically. Then, we proposed a novel D-DUIO to estimate the state of the unknown target system. We showed that, under mild assumptions, offline data are representative of any online input/output/state trajectory generated by the continuous-time unknown system.\nIn addition, it was shown that, using only the offline\ndata, it is possible to both verify if the given sufficient condition for the existence of a D-DUIO holds and to derive a family of possible choices for the D-DUIO matrices.\nSimulation results validated the efficacy of the proposed approach. Future research will focus on extending the framework to more complex settings, such as nonlinear systems and switching network topologies."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "7",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Appendix A Lemma 3 and its proof",
|
| 69 |
+
"text": "Let be arbitrary in , and let be the matrix obtained from by removing the th row and th column.\nUnder Assumption 1 ###reference_umption1###,\n is a Hurwitz compartmental matrix.\nSuppose, without loss of generality, . We have already noticed (see Remark 1 ###reference_ark1###) that is compartmental, irreducible and\n. Therefore, since is obtained from by removing its first row and first column,\n is Metzler and satisfies\n.\nIn addition, the irreducibility assumption on implies that there exists such that\n, and hence\n.\nWe now distinguish between two cases. \n irreducible. If is irreducible, we can exploit Lemma 3 in Valcher and Zorzan (2018 ###reference_b33###) to prove that is Hurwitz. \n reducible. Let be a permutation matrix such that\nwhere is a Metzler irreducible matrix, and .\nThen,\nSince is irreducible, then the matrix in (24 ###reference_###) is irreducible too. Therefore, for every , , and hence , with at least one entry strictly negative. Since , is irreducible, we can apply again Lemma 3 in Valcher and Zorzan (2018 ###reference_b33###) to conclude that is Hurwitz, and hence is Hurwitz."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "8",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Appendix B Derivation of Eqn.(7)",
|
| 75 |
+
"text": "Upon introducing the estimation\nerror of node ,\none deduces that the th estimation error dynamics is\nTaking the time derivative of (25 ###reference_###) yields\nSo, by referring to , one can rewrite (26 ###reference_###) compactly as (7 ###reference_###)."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "9",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Appendix C Proof of Theorem 1",
|
| 81 |
+
"text": "First of all, we observe that by choosing the matrices and , as in (12a ###reference_.1###)-(12e ###reference_.5###) we satisfy conditions (8 ###reference_###), and hence the estimation error evolves according to equation\n(9 ###reference_###).\nMoreover, is Hurwitz. Therefore, if we impose and , we obtain\nNow it remains to prove that we can always choose so that is Hurwitz. Consider the following Lyapunov function\n\nwhich is a positive definite function of , whose dynamics is given by\nThe time derivative of along (27 ###reference_###) satisfies\nand hence if satisfies (13 ###reference_###)\nthen is Hurwitz. Consequently,\n is Hurwitz, and the state estimation error asymptotically converges to zero."
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"appendix": [],
|
| 85 |
+
"tables": {
|
| 86 |
+
"1": {
|
| 87 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Evaluation metrics of DUIO, D-DUIO and ID-DUIO</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S5.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T1.1.1.1.1\"><span class=\"ltx_text\" id=\"S5.T1.1.1.1.1.1\">Method</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T1.1.1.1.2\">MSE</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S5.T1.1.1.1.3\">MAE</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T1.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.1.1\"><span class=\"ltx_text\" id=\"S5.T1.1.2.1.1.1\">Model-based DUIO</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.1.2\">5.1627</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T1.1.2.1.3\">0.5614</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.1.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.1.3.2.1\">Proposed D-DUIO</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.1.3.2.2\"><span class=\"ltx_text\" id=\"S5.T1.1.3.2.2.1\">5.5629</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T1.1.3.2.3\"><span class=\"ltx_text\" id=\"S5.T1.1.3.2.3.1\">0.5718</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T1.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T1.1.4.3.1\"><span class=\"ltx_text\" id=\"S5.T1.1.4.3.1.1\">ID-DUIO</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T1.1.4.3.2\">6.5461</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S5.T1.1.4.3.3\">0.8510</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 88 |
+
"capture": "Table 1: Evaluation metrics of DUIO, D-DUIO and ID-DUIO"
|
| 89 |
+
}
|
| 90 |
+
},
|
| 91 |
+
"image_paths": {
|
| 92 |
+
"1": {
|
| 93 |
+
"figure_path": "2401.04660v2_figure_1.png",
|
| 94 |
+
"caption": "Figure 1: Scheme of the proposed distributed sensor network.",
|
| 95 |
+
"url": "http://arxiv.org/html/2401.04660v2/x1.png"
|
| 96 |
+
},
|
| 97 |
+
"2": {
|
| 98 |
+
"figure_path": "2401.04660v2_figure_2.png",
|
| 99 |
+
"caption": "Figure 2: The sensor network topology.",
|
| 100 |
+
"url": "http://arxiv.org/html/2401.04660v2/x2.png"
|
| 101 |
+
},
|
| 102 |
+
"3": {
|
| 103 |
+
"figure_path": "2401.04660v2_figure_3.png",
|
| 104 |
+
"caption": "Figure 3: The estimation performance of D-DUIO.",
|
| 105 |
+
"url": "http://arxiv.org/html/2401.04660v2/x3.png"
|
| 106 |
+
},
|
| 107 |
+
"4": {
|
| 108 |
+
"figure_path": "2401.04660v2_figure_4.png",
|
| 109 |
+
"caption": "Figure 4: The estimation error of D-DUIO.",
|
| 110 |
+
"url": "http://arxiv.org/html/2401.04660v2/x4.png"
|
| 111 |
+
},
|
| 112 |
+
"5": {
|
| 113 |
+
"figure_path": "2401.04660v2_figure_5.png",
|
| 114 |
+
"caption": "Figure 5: The estimation performance of DUIO.",
|
| 115 |
+
"url": "http://arxiv.org/html/2401.04660v2/x5.png"
|
| 116 |
+
},
|
| 117 |
+
"6": {
|
| 118 |
+
"figure_path": "2401.04660v2_figure_6.png",
|
| 119 |
+
"caption": "Figure 6: The estimation error of DUIO.",
|
| 120 |
+
"url": "http://arxiv.org/html/2401.04660v2/x6.png"
|
| 121 |
+
},
|
| 122 |
+
"7": {
|
| 123 |
+
"figure_path": "2401.04660v2_figure_7.png",
|
| 124 |
+
"caption": "Figure 7: The estimation performance of ID\u2013DUIO.",
|
| 125 |
+
"url": "http://arxiv.org/html/2401.04660v2/x7.png"
|
| 126 |
+
},
|
| 127 |
+
"8": {
|
| 128 |
+
"figure_path": "2401.04660v2_figure_8.png",
|
| 129 |
+
"caption": "Figure 8: The estimation error of ID\u2013DUIO.",
|
| 130 |
+
"url": "http://arxiv.org/html/2401.04660v2/x8.png"
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
"validation": true,
|
| 134 |
+
"references": [
|
| 135 |
+
{
|
| 136 |
+
"1": {
|
| 137 |
+
"title": "An online scalable approach to unified multirobot cooperative\nlocalization and object tracking.",
|
| 138 |
+
"author": "A. Ahmad, G. Lawless, and P. Lima.",
|
| 139 |
+
"venue": "IEEE Trans. Robot., 33(5):1184\u20131199, Oct.\n2017.",
|
| 140 |
+
"url": null
|
| 141 |
+
}
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"2": {
|
| 145 |
+
"title": "Data-driven model predictive control with stability and robustness\nguarantees.",
|
| 146 |
+
"author": "J. Berberich, J. K\u00f6hler, M. A. M\u00fcller, and F. Allg\u00f6wer.",
|
| 147 |
+
"venue": "IEEE Trans. Autom. Control, 66(4):1702\u20131717, June, 2020.",
|
| 148 |
+
"url": null
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"3": {
|
| 153 |
+
"title": "Data-driven analysis and control of continuous-time systems under\naperiodic sampling.",
|
| 154 |
+
"author": "J. Berberich, S. Wildhagen, M. Hertneck, and F. Allg\u00f6wer.",
|
| 155 |
+
"venue": "IFAC-PapersOnLine, 54(7):210\u2013215, Sept.\n2021.",
|
| 156 |
+
"url": null
|
| 157 |
+
}
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"4": {
|
| 161 |
+
"title": "Robust constraint satisfaction in data-driven MPC.",
|
| 162 |
+
"author": "J. Berberich, J. K\u00f6hler, M. A. M\u00fcller, and F. Allg\u00f6wer.",
|
| 163 |
+
"venue": "In Proc. IEEE Conf. Decision and Control, pages 1260\u20131267,\nJeju, Korea, Dec. 14-18, 2020.",
|
| 164 |
+
"url": null
|
| 165 |
+
}
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"5": {
|
| 169 |
+
"title": "Regularization for distributionally robust state estimation and\nprediction.",
|
| 170 |
+
"author": "J. S. Brouillon, F. D\u00f6rfler, and G. Ferrari-Trecate.",
|
| 171 |
+
"venue": "IEEE Control Syst. Lett., 7:2713\u20132718, June, 2023.",
|
| 172 |
+
"url": null
|
| 173 |
+
}
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"6": {
|
| 177 |
+
"title": "Distributed unknown inout observer.",
|
| 178 |
+
"author": "G. Cao and J. Wang.",
|
| 179 |
+
"venue": "IEEE Trans. Autom. Control, 68(12):8244\u20138251, Dec.\n2023.",
|
| 180 |
+
"url": null
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"7": {
|
| 185 |
+
"title": "Distributed unknown input observers for interconnected nonlinear\nsystems.",
|
| 186 |
+
"author": "A. Chakrabarty, S. Sundaram, M. J. Corless, G. T. Buzzard, S. H. \u017bak, and\nA. E. Rundell.",
|
| 187 |
+
"venue": "In Proc. American Control Conf., pages 101\u2013106, Boston, MA,\nUSA, July, 6-8, 2016.",
|
| 188 |
+
"url": null
|
| 189 |
+
}
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"8": {
|
| 193 |
+
"title": "Weighted optimization-based distributed Kalman filter for nonlinear\ntarget tracking in collaborative sensor networks.",
|
| 194 |
+
"author": "J. Chen, J. Li, S. Yang, and F. Deng.",
|
| 195 |
+
"venue": "IEEE Trans. Cybern., 47(11):3892\u20133905,\nJuly, 2016.",
|
| 196 |
+
"url": null
|
| 197 |
+
}
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"9": {
|
| 201 |
+
"title": "Data-enabled predictive control: In the shallows of the DeePC.",
|
| 202 |
+
"author": "J. Coulson, J. Lygeros, and F. D\u00f6rfler.",
|
| 203 |
+
"venue": "In Proc. European Control Conf., pages 307\u2013312, Naples,\nItaly, June, 25-28, 2019.",
|
| 204 |
+
"url": null
|
| 205 |
+
}
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"10": {
|
| 209 |
+
"title": "Complements to full order observer design for linear systems with\nunknown inputs.",
|
| 210 |
+
"author": "M. Darouach.",
|
| 211 |
+
"venue": "Appl. Math. Lett., 22(7):1107\u20131111, July, 2009.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"11": {
|
| 217 |
+
"title": "Full-order observers for linear systems with unknown inputs.",
|
| 218 |
+
"author": "M. Darouach, M. Zasadzinski, and S. J. Xu.",
|
| 219 |
+
"venue": "IEEE Trans. Autom. Control, 39(3):606\u2013609, Mar.\n1994.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"12": {
|
| 225 |
+
"title": "Formulas for data-driven control: Stabilization, optimality, and\nrobustness.",
|
| 226 |
+
"author": "C. De Persis and P. Tesi.",
|
| 227 |
+
"venue": "IEEE Trans. Autom. Control, 65(3):909\u2013924, Dec. 2020.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"13": {
|
| 233 |
+
"title": "Event-triggered control from data.",
|
| 234 |
+
"author": "C. De Persis, R. Postoyan, and P. Tesi.",
|
| 235 |
+
"venue": "IEEE Trans. Autom. Control, 69(6):3780\u20133795, June, 2024.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"14": {
|
| 241 |
+
"title": "On the equivalence of model-based and data-driven approaches to the\ndesign of unknown-input observers.",
|
| 242 |
+
"author": "G. Disar\u00f2 and M. E. Valcher.",
|
| 243 |
+
"venue": "arXiv:2311.00673, Apr. 2024.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"15": {
|
| 249 |
+
"title": "Distributed moving horizon estimation for linear constrained systems.",
|
| 250 |
+
"author": "M. Farina, G. Ferrari-Trecate, and R. Scattolini.",
|
| 251 |
+
"venue": "IEEE Trans. Autom. Control, 55(11):2462\u20132475, Mar. 2010.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"16": {
|
| 257 |
+
"title": "A data-driven approach to UIO-based fault diagnosis.",
|
| 258 |
+
"author": "G. Fattore and M. E. Valcher.",
|
| 259 |
+
"venue": "In Proc. IEEE Conf. Decision and Control, available on arXiv:\narXiv:2404.06158, 2024.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"17": {
|
| 265 |
+
"title": "CVX: Matlab software for disciplined convex programming, version\n2.1.",
|
| 266 |
+
"author": "M. Grant and S. Boyd.",
|
| 267 |
+
"venue": "http://cvxr.com/cvx, Mar. 2014.",
|
| 268 |
+
"url": null
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"18": {
|
| 273 |
+
"title": "Hybrid nonnegative and compartmental dynamical systems.",
|
| 274 |
+
"author": "W. M. Haddad, V. Chellaboina, and S. G. Nersesov.",
|
| 275 |
+
"venue": "Math. Probl. Eng., 8(6):493\u2013515, Jan. 2002.",
|
| 276 |
+
"url": null
|
| 277 |
+
}
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"19": {
|
| 281 |
+
"title": "Model-Free Adaptive Control: Theory and Applications.",
|
| 282 |
+
"author": "Z. Hou and S. Jin.",
|
| 283 |
+
"venue": "Boca Raton: CRC Press, 2013.",
|
| 284 |
+
"url": null
|
| 285 |
+
}
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"20": {
|
| 289 |
+
"title": "Data-driven consensus control of fully distributed event-triggered\nmulti-agent systems.",
|
| 290 |
+
"author": "Y. Li, X. Wang, J. Sun, G. Wang, and J. Chen.",
|
| 291 |
+
"venue": "Sci. China Inf. Sci., 66(5):1\u201315, May,\n2023.",
|
| 292 |
+
"url": null
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"21": {
|
| 297 |
+
"title": "Data-driven resilient predictive control under denial-of-service.",
|
| 298 |
+
"author": "W. Liu, J. Sun, G. Wang, F. Bullo, and J. Chen.",
|
| 299 |
+
"venue": "IEEE Trans. Autom. Control, 68(8):4722\u20134737, Aug. 2023a.",
|
| 300 |
+
"url": null
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"22": {
|
| 305 |
+
"title": "Learning robust data-based LQG controllers from noisy data.",
|
| 306 |
+
"author": "W. Liu, J. Sun, G. Wang, F. Bullo, and J. Chen.",
|
| 307 |
+
"venue": "IEEE Trans. Autom. Control, pages 1\u201313, May,\n2023b.",
|
| 308 |
+
"url": null
|
| 309 |
+
}
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"23": {
|
| 313 |
+
"title": "On a continuous-time version of Willems\u2019 Lemma.",
|
| 314 |
+
"author": "V. G. Lopez and M. A. M\u00fcller.",
|
| 315 |
+
"venue": "In Proc. IEEE Conf. Decision and Control, pages 2759\u20132764,\nCanc\u00fan, Mexico, 2022.",
|
| 316 |
+
"url": null
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"24": {
|
| 321 |
+
"title": "Sensor-network-based robust distributed control and estimation.",
|
| 322 |
+
"author": "P. Mill\u00e1n, L. Orihuela, C. Vivas, F. R. Rubio, D. V. Dimarogonas, and K. H.\nJohansson.",
|
| 323 |
+
"venue": "Control Eng. Pract., 21(9):1238\u20131249,\nSept. 2013.",
|
| 324 |
+
"url": null
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"25": {
|
| 329 |
+
"title": "Data-driven criteria for detectability and observer design for lti\nsystems.",
|
| 330 |
+
"author": "V. K. Mishra, H. J. van Waarde, and N. Bajcinca.",
|
| 331 |
+
"venue": "In Proc. IEEE Conf. Decision and Control, pages 4846\u20134852,\nCanc\u00fan, Mexico, Dec. 6-9, 2022 .",
|
| 332 |
+
"url": null
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"26": {
|
| 337 |
+
"title": "Distributed unknown input observers for fault detection and\nisolation.",
|
| 338 |
+
"author": "S. Nazari and B. Shafai.",
|
| 339 |
+
"venue": "In Proc. IEEE Int. Conf. on Control and Autom., pages\n319\u2013324, Edinburgh, UK, July, 16-19, 2019.",
|
| 340 |
+
"url": null
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"27": {
|
| 345 |
+
"title": "Distributed Kalman filter with embedded consensus filters.",
|
| 346 |
+
"author": "R. Olfati-Saber.",
|
| 347 |
+
"venue": "In Proc. IEEE Conf. Decision and Control, pages 8179\u20138184,\nSeville, Spain, Dec. 12-15, 2005.",
|
| 348 |
+
"url": null
|
| 349 |
+
}
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"28": {
|
| 353 |
+
"title": "Data-driven input reconstruction and experimental validation.",
|
| 354 |
+
"author": "J. Shi, Y. Lian, and C. N. Jones.",
|
| 355 |
+
"venue": "IEEE Control Syst. Lett., 6:3259\u20133264, June, 2022.",
|
| 356 |
+
"url": null
|
| 357 |
+
}
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"29": {
|
| 361 |
+
"title": "Comparing robustness of the Kalman, , and UFIR\nfilters.",
|
| 362 |
+
"author": "Y. S. Shmaliy, F. Lehmann, S. Zhao, and C. K. Ahn.",
|
| 363 |
+
"venue": "IEEE Trans. Signal Process., pages 3447\u20133458, May, 2018.",
|
| 364 |
+
"url": null
|
| 365 |
+
}
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"30": {
|
| 369 |
+
"title": "Event-based state estimation with variance-based triggering.",
|
| 370 |
+
"author": "S. Trimpe and R. D\u2019Andrea.",
|
| 371 |
+
"venue": "IEEE Trans. Autom. Control, 59(12):3266\u20133281, Aug. 2014.",
|
| 372 |
+
"url": null
|
| 373 |
+
}
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"31": {
|
| 377 |
+
"title": "Data-driven unknown-input observers and state estimation.",
|
| 378 |
+
"author": "M. S. Turan and G. Ferrari-Trecate.",
|
| 379 |
+
"venue": "IEEE Control Syst. Lett., 6:1424\u20131429, Aug. 2021.",
|
| 380 |
+
"url": null
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"32": {
|
| 385 |
+
"title": "State observers for discrete-time linear systems with unknown inputs.",
|
| 386 |
+
"author": "M. E. Valcher.",
|
| 387 |
+
"venue": "IEEE Trans. Autom. Control, 44(2):397\u2013401, Feb. 1999.",
|
| 388 |
+
"url": null
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"33": {
|
| 393 |
+
"title": "State-feedback stabilization of multi-input compartmental systems.",
|
| 394 |
+
"author": "M. E. Valcher and I. Zorzan.",
|
| 395 |
+
"venue": "Syst. Control Lett., 119:81\u201391, Sept. 2018.",
|
| 396 |
+
"url": null
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"34": {
|
| 401 |
+
"title": "A note on persistency of excitation.",
|
| 402 |
+
"author": "J. C. Willems, P. Rapisarda, I. Markovsky, and B. L. M. De Moor.",
|
| 403 |
+
"venue": "Syst. Control Lett., 54(4):325\u2013329, Apr.\n2005.",
|
| 404 |
+
"url": null
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"35": {
|
| 409 |
+
"title": "Robust data-driven moving horizon estimation for linear discrete-time\nsystems.",
|
| 410 |
+
"author": "T. M. Wolff, V. G. Lopez, and M. A. M\u00fcller.",
|
| 411 |
+
"venue": "IEEE Trans. Autom. Control, 69(8):5598\u20135604, Aug. 2024.",
|
| 412 |
+
"url": null
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"36": {
|
| 417 |
+
"title": "State estimation using a network of distributed observers with\nunknown inputs.",
|
| 418 |
+
"author": "G. Yang, A. Barboni, H. Rezaee, and T. Parisini.",
|
| 419 |
+
"venue": "Automatica, 146:110631, Dec. 2022.",
|
| 420 |
+
"url": null
|
| 421 |
+
}
|
| 422 |
+
}
|
| 423 |
+
],
|
| 424 |
+
"url": "http://arxiv.org/html/2401.04660v2"
|
| 425 |
+
}
|
20241004/2401.07867v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2402.04732v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2402.09272v2.json
ADDED
|
@@ -0,0 +1,548 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Insights and caveats from mining local and global temporal motifs in cryptocurrency transaction networks",
|
| 3 |
+
"abstract": "Distributed ledger technologies have opened up a wealth of fine-grained transaction data from cryptocurrencies like Bitcoin and Ethereum. This allows research into problems like anomaly detection, anti-money laundering, pattern mining and activity clustering (where data from traditional currencies is rarely available). The formalism of temporal networks offers a natural way of representing this data and offers access to a wealth of metrics and models. However, the large scale of the data presents a challenge using standard graph analysis techniques. We use temporal motifs to analyse two Bitcoin datasets and one NFT dataset, using sequences of three transactions and up to three users. We show that the commonly used technique of simply counting temporal motifs over all users and all time can give misleading conclusions. Here we also study the motifs contributed by each user and discover that the motif distribution is heavy-tailed and that the key players have diverse motif signatures. We study the motifs that occur in different time periods and find events and anomalous activity that cannot be seen just by a count on the whole dataset. Studying motif completion time reveals dynamics driven by human behaviour as well as algorithmic behaviour.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Digital assets have a growing importance in the modern world with cryptocurrencies and Non-Fungible Tokens (NFTs) being important examples: cryptocurrencies are analogous to digital wealth and NFTs are analogous to digital ownership. An advantage for researchers is that the transfer is recorded, almost always publicly, on a blockchain giving valuable data-sets and opportunities to bring sophisticated analysis tools to bear. This enables us to gain insight into the nature of these ecosystems. An additional challenge in this kind of analysis is that these systems are relatively young; Bitcoin (BTC) was created in 2008 [20 ###reference_b20###], NFTs in 2014 and hence they change rapidly over time. The systems can be viewed as a set of exchanges between a pair of users with each exchange having a timestamp and an amount. It is natural to view and analyse such systems as weighted temporal graphs, where nodes are users and a directed edge represents a transaction from the source to the destination node.\nThe paper shows that temporal motifs are a powerful tool for analysis of transaction networks. We extend the work of [21 ###reference_b21###] by providing methods that allow temporal motifs to be examined locally (that is, counting the number and type of temporal motifs centred around each node) and temporally (that is showing how the number and type of motifs evolve over time). These extensions rely on integrating our algorithms with our Raphtory software [23 ###reference_b23###] that allows efficient parallel computation of temporal structures in networks. This analysis would not be possible using the software developed in [21 ###reference_b21###]. By taking a deep dive into datasets centred on cryptocurrency we show how these tools can be used in practice.\nThe paper also presents a number of caveats: (i) Traditional assumptions about temporal ordering within a motif do not hold precisely with transactions within a single block (we show that the effects of this on our results are minor but this might not be the case for all possible analysis). (ii) Motif counts of the transaction graph as a whole might give a distribution of motifs that places undue attention on events limited to a single node or a small number of nodes and (iii) limited to a short span of time. Using motif analysis on these networks requires researchers to investigate the distribution both across time and across individual nodes. We show how the tools and techniques developed for this research can achieve this."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Background",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II.1 Temporal motif definition",
|
| 21 |
+
"text": "Motifs [17 ###reference_b17###, 31 ###reference_b31###] are small commonly occurring subgraphs in a network, which can be thought of as measuring the organisation of that network at a small scale. The study of motifs has been of great importance in the fields of biology [26 ###reference_b26###], anatomy [22 ###reference_b22###] and social science [10 ###reference_b10###]. Temporal motifs [21 ###reference_b21###, 11 ###reference_b11###] add a requirement that the motif takes place in a set order and within a certain timeframe.\n###figure_1### ###figure_2### ###figure_3### ###figure_4### We begin by defining what we mean by a temporal motif following Paranjape et al. [21 ###reference_b21###]. Let the tuple define a transaction from user to user at time , and let represent a time interval. Define a -node -edge temporal motif as a sequence of edges such that and , and the graph induced by these edges is connected with nodes. is an instance of motif if there is a mapping for and the same constraints as are obeyed. Figure 1 ###reference_### shows a temporal motif , a timestamped graph, an example instance and non-instance of for a given . In this paper we consider three-edge up-to-three-node temporal motifs.\nWhen considering the global motif count there are thirty six possible motifs of this type (pictured in fig. 2 ###reference_###). We divide them here into six classes according to the number of nodes, the subgraph formed by aggregating the motif\u2019s edges, and the direction of the transactions involved, along with their colour in fig. 2 ###reference_###:\n(i) Two-node motifs, all transactions in the same direction (dark grey); (ii) two-node motifs with transactions in mixed directions (green); (iii) a three node \u201cstar\" (a centre node with two nodes connected to it) with all transactions incoming to the centre (pink); (iv) a three node \u201cstar\" with all transactions outgoing from the centre (blue); (v) a three node star with mixed transaction directions (orange) and finally (vi) triangular motifs (light grey) which can have transactions in a cyclic direction (e.g. and ) or mixed directions. For a global motif count these are the only possible motifs with three edges and up to three nodes. Figure 2 ###reference_### is shaded to distinguish these motifs. With a local motif count then we can distinguish some extra motif types. For example, three transactions in the same direction between the same pair of nodes (the lower left box in fig. 2 ###reference_###) would be seen as \u201call outgoing\" from the point of view of one node but \u201call incoming\" from the point of view of the other. The two node motifs that can be distinguished from a local node (but not a global perspective) are surrounded by a box in fig. 2 ###reference_###.\n###figure_5###"
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II.2 Related work",
|
| 27 |
+
"text": "Temporal motifs have been used for studying financial transactions [15 ###reference_b15###], call networks [12 ###reference_b12###], email interactions [4 ###reference_b4###] and patent networks [16 ###reference_b16###]. In the context of a transaction networks, temporal motifs can give us an insight into the pattern of trade. For example, if we often see the motif: A sends to B then B sends to C, it implies node B is often a \u201cmiddleman\" in the trade but if we often see the motif: A sends to B then A sends to C it suggests that node A may be a buyer (sending virtual currency for goods or services) or an on-ramp (trading virtual currency for real world currency). Motif counting has been used within cryptocurrency networks to detect addresses associated with mixing services [29 ###reference_b29###] (services designed to obfuscate the intended sender and recipient of a transaction) or for detecting phishing scams in Ethereum [27 ###reference_b27###] where authors posed this as a classification problem. These authors found that the inclusion of temporal motif features improved their classification results beyond using basic transaction features. Motifs have also been used to understand the longer-term formation of the socio-economic network of traders, using graph evolution rules to highlight how different structures like closed triangles emerge between traders [7 ###reference_b7###, 3 ###reference_b3###]. More generally, static motifs have been used as a descriptive graph metric for comparing cryptocurrency networks to other social networks [13 ###reference_b13###], as features for a token price prediction pipeline [5 ###reference_b5###] and to investigate the rise of smart contracts in Ethereum, typically involving groups of pre-programmed transactions [30 ###reference_b30###]. Motifs are also related to cycles which have been used to study wash trading and market manipulation in NFT markets by our team [32 ###reference_b32###] and others [25 ###reference_b25###].\nMost of the literature mentioned uses temporal motifs for a specific task such as detecting a type of entity or anomalous activity. In this paper, we step back and take a more explorative approach, highlighting the insights that can be gained using temporal motifs into cryptocurrency systems as a whole as well as individuals within them, making use of three different datasets. Two of the datasets are BTC transactions centred around different \u201cdark web marketplaces\" [19 ###reference_b19###, 6 ###reference_b6###] (services on the dark web that exist to facilitate the exchange of goods and services, often illegal) and a third is a collection of NFT sales. We use temporal motifs with three edges and a maximum of three nodes [21 ###reference_b21###] but with two further steps: (i) we disaggregate the motifs by user, that is to obtain a local motif count for every vertex in the graph and (ii) we study the whole graph motif counts over different times and different timescales. We find that counting local motifs produces a very different view of the network from simply looking at node degree or weighted node degree. It gives prominence in the analysis to a different set of nodes (the highest degree node does not have the largest motif count) and the motif count over time shows a very different trend to the transaction count over time. It also allows insight into the nature of and importance of nodes in the network that might be overlooked by more traditional temporal graph methods."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "II.3 Datasets",
|
| 33 |
+
"text": "Three cryptocurrency datasets are used in this paper. The Alphabay and Hydra datasets are bitcoin transactions centred around the Alphabay and Hydra dark web marketplaces respectively. These marketplaces act as an escrow service, such that a typical (successful) purchase on these platforms involves a transaction from the buyer to the market node followed by a transaction from the market node to the seller after the goods have been confirmed as received. They include transactions up to two hops away from the marketplace nodes (i.e. the transactions of all users who have bought or sold on these markets); this includes transactions to and from the marketplace itself. The NFT dataset is formed of transactions representing the purchase of an NFT, usually but not always in Ethereum. Unlike the dark web marketplace purchases, NFT purchases on a marketplace like OpenSea are executed using a smart contract initiating a direct transaction from buyer to seller and a small platform fee is taken by OpenSea. We do not observe transactions representing a platform fee, just the buyer to seller transactions.\nIn Bitcoin, one person tends to have multiple different wallets (for example, a basic transaction involving user sending an amount to user will involve an additional change wallet belonging to user ). The Alphabay and Hydra datasets are therefore pre-processed by Chainalysis Inc. such that wallets who are believed to be associated with the same person are merged into one entity in the graph. The NFT purchases network is not preprocessed in this way. In the NFT network the direction of the transfer is the direction in which money is sent, not the direction in which the NFT is sent, this makes it consistent with the other two datasets. Part of the nature of blockchain systems is that transactions are grouped in \u201cblocks\". If two transactions are within the same block it is not certain which order they came in, although it might be argued that a transaction at the end of the block is highly likely to occur after one at the beginning. This issue is discussed in depth in section V.5 ###reference_###.\nIn all datasets, nodes represent wallets (potentially a group of wallets as discussed above for Alphabay and Hydra); a directed edge from node to node at time represents a transaction from the user represented by node to the user represented by node . Table 1 ###reference_### gives some statistics on the number of nodes, the number of edges (the number of ordered pairs such that there is at least one transaction from to ) and the total number of transactions.\nThroughout the paper we will refer to the data sets as Alphabay, Hydra and NFT. It\u2019s important to remember that Alphabay and Hydra can be viewed as different subsets of the same underlying dataset (the global bitcoin transfer network)."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "III Results",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III.1 Motifs carry different information to (weighted) node degree",
|
| 45 |
+
"text": "###figure_6### ###figure_7### ###figure_8### It is important to establish that motif counts are giving different information than simply looking at, for example, node degree. In fig. 3 ###reference_### for each node we plot the number of motifs (with of one hour for Alphabay/Hydra and one day for NFTs) against the number of transactions. We colour the node by the wallet balance, that is the amount in dollars coming in to that node minus the amount going out within the time period. An obvious feature is that while there is a clear correlation between the number of transactions and the number of motifs, this varies by many orders of magnitude; for example a node that is involved in 1,000 transactions may be part of fewer than 10 motifs but may be involved in as many as 100 billion. The node in fig. 3(a) ###reference_sf1### with the most transactions (corresponding to Alphabay itself) is not in the top ten for motif count. Besides the large fluctuations in the relations between number of transactions and number of motifs, one also observes non-trivial patterns, such as in fig. 3(b) ###reference_sf2###, which may be associated to the existence of different types of users in the system."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III.2 Global motif counts show fanout patterns of trade",
|
| 51 |
+
"text": "In figs. 4(a) ###reference_sf1###, 4(b) ###reference_sf2### and 4(c) ###reference_sf3### we look at the global number of motifs for each dataset. A first thing to note is the huge variation in motif counts. The highest motif count for Alphabay is 9.3 trillion for an outgoing star motif whereas the smallest motif count is 164 thousand for a mixed triangular motif count. It is clear that some types of motif have extremely high prevalence in this data compared to others. Still, one may wonder if these high counts of motifs occur by chance due (say) to an entity having a high number of outgoing transactions and the motifs are simply a product of a large number of edges between certain node pairs. To investigate this, we use a randomised reference model (see section V.4 ###reference_###) which preserves the source and destination of all transactions but permutes the time at random, so that the overall activity rate and aggregate graph structure is preserved but the order of events is lost. Figures 4(d) ###reference_sf4###, 4(e) ###reference_sf5### and 4(f) ###reference_sf6### show the ratio between the motif counts in the original and shuffled network, showing that for Alphabay the four all-outgoing motifs are thousands of times overrepresented. All motifs are represented more than would be expected in the null reference model. Similar results hold for the Hydra dataset. An outbound pattern corresponds to an entity which is largely sending currency, perhaps buying physical goods, performing automated services or exchanging to some other (perhaps fiat) currency.\n###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13### ###figure_14### For the NFT data we found of one day was a more appropriate time scale (again see section III.6 ###reference_###). This may be because the NFT data set has a slower rate of arrival of edges. The NFT data set shows more diversity of motif types. The extreme difference in count magnitudes is still present but is reduced (the largest motif has a count of 1.6 billion for an outgoing star, the smallest, 471 thousand for a mixed direction triangle). While outbound star patterns are still the most common motifs, mixed star patterns with one node always inbound and one node always outbound are also common, perhaps a user selling one NFT to fund the purchase of another or buying an NFT and selling it shortly after. It is also worth noting that triangular motifs are rare in the NFT marketplace (again this is not unexpected, there is no particular reason three traders would wish to exchange NFTs between themselves) and in some cases the number of triangle motifs is lower than the chance results from the time-shuffled null model ( and ).\nA note on interpreting motif count plots: In fig. 4 ###reference_### the cell corresponds to the count in fig. 2 ###reference_###. However, the axes can be used to interpret which motif is being used without referring to that figure. The x and y axes show the directions and participants for the three transactions. The row (y-axis) corresponds to the first two transactions and the column (x-axis) corresponds to the third. For example, the top right box is the motif count for transactions from yellow to red, then blue to red then blue to red a second time (a type of inbound star motif)."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.3",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "III.3 Motif counts are dominated by a very small number of nodes",
|
| 57 |
+
"text": "Given the magnitude of the motif counts found in the graph-wide counts, we next ask how these motifs are dispersed across the nodes in the network. Figure 5 ###reference_### shows the distribution of motifs across the nodes in the network. For Alphabay we largely see straight lines in the log-log CCDF which is characteristic of power law behaviour and shows huge dominance by the nodes with the most motifs. In Hydra only the three-node star with all incoming edges and two-node same direction motifs are power-law, the others display sudden downward \u201cbumps\u201d (e.g. around motifs for 3 node star all outgoing). These \u201cbumps\" indicate a group of nodes with extremely similar motif counts, one possible explanation for this is the presence of groups of nodes implementing similar automated behaviour.\n###figure_15### ###figure_16### ###figure_17### For the NFT data Figure 5(c) ###reference_sf3### the situation is similar but less extreme. The curves are smoother than in the Alphabay and Hydra and fall off towards higher motif counts. However, they still indicate a situation where the bulk of the count of motifs is with a small number of nodes. The two-node mixed direction motif has a much smaller count than any other in this data set. This is unsurprising as it would indicate two people buying NFTs from each other in quick succession."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.4",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "III.4 Motifs are varied among the top 10 nodes",
|
| 63 |
+
"text": "Among the top 10 nodes in each dataset (ordered by total motif counts), fig. 6 ###reference_### shows that there are a number of distinct \u2018signatures\u2019 which may signify different functional roles of the nodes in this network. One of the patterns (e.g. shared by nodes 3,6,8,9 and 10 in Alphabay and 8 in Hydra) is mostly having just pairwise incoming motifs, another (nodes 1, 4, 5 in Alphabay and 1, 2, 4 in Hydra) being star motifs with all outgoing edges. The Alphabay node itself, despite its central role in the network as an escrow, placed 17th in total number of 1-hour motifs. The top node in Alphabay has 19 trillion motifs in total, a slightly negative transaction balance (about 2% of its incoming transaction value) and a large out-degree (1831183) compared to in-degree (668). By checking with a blockchain explorer the addresses involved in the transaction and referencing with the GraphSense wallet/address labelling service [9 ###reference_b9###], these addresses seem to be associated with an illicit service that was very popular for a short amount of time.\nAmong the highest motif nodes in the NFT dataset, some (e.g. nodes 4, 5, 7, 9 and 10) show similarities to top nodes in Alphabay and Hydra with all-outgoing patterns being common, but we also see more incoming-based star motifs (nodes 6, 8) and the top node has more of a variety of motifs.\n###figure_18###"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3.5",
|
| 67 |
+
"parent_section_id": "3",
|
| 68 |
+
"section_name": "III.5 Motifs reveal events not observable by studying transaction volume",
|
| 69 |
+
"text": "Next we study how the motif counts are spread out through the time periods of the different datasets. To do this, we group the transactions by month and count the temporal motifs within that month with set to one hour for Alphabay and Hydra and set to one day for the NFT sales following previous experiments. Figure 7 ###reference_### shows these results, in terms of total motif counts and relative counts. In the Alphabay data, we see a spike in motif counts some months after the market opens in 2014, which at first seems to follow the trend in transaction volume. However, these counts drop off long before the transactions do, and long before the market is shut down. Given that the entity with the most motifs is possibly an illicit service or mixer (discussed in section III.4 ###reference_###), the drop-off could be due to users moving away from using it. In the Hydra market, we observe shadows of Alphabay\u2019s active period between 2014-2017 (some of Hydra\u2019s userbase were Alphabay users who migrated when it was shut down [19 ###reference_b19###]. This is followed by a similar spike and premature drop-off of motifs to Alphabay that is not mirrored in the transaction volume, which is still increasing at the end of our observation period. The vast majority of the NFT motifs occur within early 2020 when the technology was taking off followed by another spike at the end of the study period.\nFollowing this, we investigate the composition of motifs over time, that is the counts of each category of motif as a proportion of all motif counts. In the Alphabay dataset, most of the motifs between 2013 and 2016 are two-node motifs representing transactions between peers (it is unlikely that these two-node motifs would involve the marketplace at this point as this would additionally invoke star motifs). At some point coinciding with the spike observed in all motif counts, this behaviour transitions to being all-outgoing stars which dominate, which could be the result of entities like the marketplace itself or an exchange facilitating transactions, or switching behaviour of users making purchases among different vendors. However, instead of the two-node motifs returning after Alphabay is shut down in 2017, the star motifs are still the most prevalent, fluctuating between all-incoming and all-outgoing. In the NFT dataset, apart from early 2020 where there is a spike in motif volume, most of the motifs are all-incoming stars. This could be due to the practice of sellers auctioning a number of their NFTs with the same closing period (and hence the transactions from winning bidders come in at the same time).\n###figure_19### ###figure_20### ###figure_21### ###figure_22### ###figure_23### ###figure_24###"
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "3.6",
|
| 73 |
+
"parent_section_id": "3",
|
| 74 |
+
"section_name": "III.6 Choosing and examining the effects of time period",
|
| 75 |
+
"text": "Obviously, the choice of the time period is important. In this section we justify the choice of one hour for Alphabay and Hydra and one day for NFT. We do this by asking how many extra motifs are introduced by increasing the time window by a single hour. This highlights time periods of particular importance. By subtracting the motifs obtained with time window plus one hour from those with time window we can investigate the new motifs introduced as the time window used is increased. Moreover we can assess the robustness of the parameter as a measure of the number of motifs. Figure 8 ###reference_### shows the effect of increasing from one hour up to just over one week (168 hours) by one hour increments. First, we see that for Alphabay, our chosen size for most of the analysis hour captures the vast majority of short-range motifs, seen by the dip from one to two hours across the most common motifs (those which are three nodes outgoing and three nodes mixed directions). Apart from the two nodes mixed directions motif, the increase in motifs is then stable up to seven days. The graph for two nodes mixed directions shows a variety of peaks at between one hour and twelve hours; the reason for this is not completely certain but these could be human-moderated trades between two individuals done on the same day. Finally, all motifs show a peak at one week, this is due to an autofinalisation escrow mechanism in Alphabay where trades are cleared if not confirmed by the buyer within seven days. This leads to a peak at precisely seven days where a transaction directly causes another exactly seven days later. This is not a particularly useful timescale for analysis though, firstly because it is very long and would lead to extremely large motif counts but mainly because the motifs are a result of a \u201crefund\" mechanism rather than genuine trades.\nFor the NFT trading the three node motif counts all show large peaks at intervals of one day. The peaks increase in size as the number of days increases. A possible explanation is that the motif counts are related to the human diurnal cycle and people tend to trade with people in the same geographic area. Whatever the mechanism, this makes of 1 day the most obvious time period to study. In the case of motifs involving only two nodes there is no clear peak in the motif counts but these motifs are relatively rare compared with three node motifs (less than 1% of the number of three node motifs). It is also interesting to note that the time-shuffled null model produces as many or more triangles than the real data showing that triangular trade patterns between three individuals are rare in the NFT marketplace within the one week time scale.\n###figure_25### ###figure_26###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "IV Discussion and conclusion",
|
| 81 |
+
"text": "This paper investigated the use of temporal network motifs to explain trade in cryptocurrency networks. We showed how to optimise the time period (the maximum time for a temporal motif) and how varying it gave insight into the behaviour of the studied systems. Efficient temporal graph computation in the Raphtory software allowed us to investigate multiple values of and multiple time windows. This was impossible using existing bespoke temporal motifs software. It allowed us to identify behaviours we that we would not have been able to see from a single or small number of values. We could see both human (diurnal) and mechanism (e.g. escrow timeouts) induced behaviours in the graphs.\nIn all three datasets a naive analysis of graph-wide temporal motif counts would be misleading. It is vital to consider the distribution of motifs over the time dimension and individual nodes (with particular attention to \u201cheavy-hitters\"). All the networks studied had extreme nodes that contributed a high proportion of the motifs. Simply counting motifs for the Alphabay network, for example, would not tell you that a high proportion of all motifs were from a single node in a short (six month) period. For the networks with observations centered around dark markets (Alphabay and Hydra) it was unexpected that the nodes representing the markets themselves did not top the list of nodes by motif count.\nIn the Hydra network we could clearly see evidence of synchronisation between multiple nodes likely indicating coordinated, automated behaviour. The top ten nodes by motif count could be grouped into distinct classes: mainly buying behaviour (multiple transfers to one or more nodes); mainly selling behaviour (multiple transfers from one or more other nodes) and a small number exhibiting a mixture of sell/buy behaviours. Triangular motifs were rare in all cases and lower than the null model (chance) rate for the NFT data. Done with care this form of analysis of networks leads to insights into trade behaviours not obtainable from other analysis tools.\nGiven the diverse motif profiles of heavy-hitting users and the ability to uncover events not observable by simply counting transactions, we believe local temporal motifs may prove useful as features for classification or clustering tasks, such as in the domains of anti-money laundering or fraud detection."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Methods",
|
| 87 |
+
"text": "In this paper, we study three-edge, up-to-three node, temporal motifs, defined in section II.1 ###reference_###. Three is chosen for the following reasons. Firstly, it lies in the middle of a tradeoff between descriptive power and computational complexity; there are only six possible two-edge temporal motifs but an algorithm for counting four-edge, up-to-four node, temporal motifs is not feasible to run on this scale of data because of the combinatorial explosion. In addition, with so many possible motifs interpretability becomes a problem. Secondly, a number of datasets have been studied using this technique [21 ###reference_b21###, 16 ###reference_b16###, 15 ###reference_b15###], hence our work is more comparable with that of other researchers."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.1",
|
| 91 |
+
"parent_section_id": "5",
|
| 92 |
+
"section_name": "Counting local and global motifs",
|
| 93 |
+
"text": "In this paper, as well as studying global motif counts over the whole graph, we study motif counts from the perspective of the nodes involved in them. We refer to these as the local motif counts and we also study them as they change in time. We first define a temporal graph where is a set of vertices and is a sequence of tuples , with and which we will refer to as temporal edges, representing an event (e.g. a trade of currency) from node to node at time . In our data the event often also has a weight representing the amount of currency involved in the transaction. The local motif count of vertex for a given motif (motif definition in section II.1 ###reference_###) refers to the number of times vertex has participated in motif . To count the local motifs, we implement a modified version of the algorithm by Paranjape et al [21 ###reference_b21###] in our temporal graph library Raphtory [24 ###reference_b24###, 23 ###reference_b23###] which performs the counts in parallel across the vertices. Raphtory was built from the ground up to study large-scale temporal networks efficiently, including the possibility to use \u2018windowed\u2019 approaches which enabled the study of temporal motifs over time (section III.5 ###reference_###). Global temporal motifs counting code from SNAP [14 ###reference_b14###] was tested on the Alphabay dataset but didn\u2019t complete due to out-of-memory issues, whereas the implementation in Raphtory took 3 minutes on the same hardware (2021 MacBook Pro with 16GB RAM). Note that our motif counting algorithm is fundamentally the same; in both implementations, motifs are counted for each vertex using the Paranjape et al counting algorithm [21 ###reference_b21###]. In their work, these local counts are then summed to obtain graph-wide motif counts whereas we also study the local counts. Differences in runtime/memory usage are therefore likely due to differences in the underlying graph storage/low level functions of SNAP and Raphtory. With this in mind, we point the readers to their work [21 ###reference_b21###] for pseudocode of the algorithm. We instead focus here on the details of what is meant by a local motif in terms of which nodes in a motif count that motif. A speed comparison with [21 ###reference_b21###] is in section V.3 ###reference_###."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.1.1",
|
| 97 |
+
"parent_section_id": "5.1",
|
| 98 |
+
"section_name": "V.1.1 Two-node motifs",
|
| 99 |
+
"text": "There are eight () possible two-node, three-edge, motifs when measured from the perspective of an individual node and this corresponds to three links in temporal order with two possible directions. When viewed globally, this collapses to the four possibilities shown in the bottom left of fig. 2 ###reference_### when not viewed from the perspective of a single node. Consider the following example: from the point of view of a node pair it makes sense for the node to count a three edge temporal motif with that as \u201coutgoing, incoming, outgoing\" and from the perspective of this same motif would be counted as \u201cincoming, outgoing, incoming\". From a global perspective this is a single motif with transactions in alternating directions. For plotting, we often group these into all-outgoing, all-incoming and mixed directions. The time-complexity of counting two-node motifs is where is the number of temporal edges."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5.1.2",
|
| 103 |
+
"parent_section_id": "5.1",
|
| 104 |
+
"section_name": "V.1.2 Three-node stars",
|
| 105 |
+
"text": "The star motifs (all motifs in fig. 2 ###reference_### excluding the two-node motifs and triangle motifs) involve one central node having transactions with two other leaf nodes. In our work, we include an instance of this motif in a node \u2019s local motif count only if is the central node of this motif. One might argue that the two leaf nodes should also count this motif instance. However, we argue that (i) the central node is more of interest in such a motif, (ii) having all three nodes count the motif would result in more noise, and (iii) would be more computationally intensive to run, as it may instead involve running the global motifs count across the two-hop neighbourhood of each node. As with two-node motifs, the time-complexity of counting star motifs is .\nIt is useful to aggregate these star motifs into different sub-categories of all-outgoing, all-incoming and mixed-direction stars since they represent different behavioural patterns: it is more likely to see three outgoing transactions happen at a similar time than three incoming transactions since they originate from the same source, and mixed direction stars may involve some dependent sequence of events. This classification is used in some of the figures for clarity instead of showing all 36 global/40 local motifs."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "5.1.3",
|
| 109 |
+
"parent_section_id": "5.1",
|
| 110 |
+
"section_name": "V.1.3 Triangles",
|
| 111 |
+
"text": "Triangle motifs are counted locally for each of the nodes involved in a triangle since, for each of those motifs, it is hard to make an argument for one of its constituent nodes being the most important. Additionally, there is little extra difficulty counting triangle motifs for each node (compared to leaf nodes of the star motifs for example), they are simply over-counted once for each node, with respect to the global counting algorithm which counts each triangle once. The time-complexity of counting triangle motifs in Raphtory is where is the number of static triangles in the graph. The first term relates to the enumeration of static triangles in the graph, the second to counting the number of triadic temporal motifs given the edges involved in each static triangle. We argue that in cryptocurrency transaction networks, the first term tends to dominate since the number of triangles in such a network is fairly small; this may not be the case in social networks."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "5.2",
|
| 115 |
+
"parent_section_id": "5",
|
| 116 |
+
"section_name": "Choice of timescale",
|
| 117 |
+
"text": "Naturally, the choice of timescale plays a role in what motifs are discovered. Human communication patterns have been known to have characteristic timescales [1 ###reference_b1###] based on memory/attention [28 ###reference_b28###] and diurnal behaviour [2 ###reference_b2###]. In transaction networks, we may also expect to see timescales resulting from pre-programmed rules such as preset durations for NFT auctions, transaction clearing times or payroll. While we perform a deeper study of which motifs arise at which timescale in section III.6 ###reference_###, for most of the results we choose a timescale of one hour for the Alphabay/Hydra datasets and one day for the NFT datasets, corresponding to the first peak for a number of the motifs in fig. 8 ###reference_###. One observation which may be useful to practitioners is that one can study motifs which have a duration between values and by subtracting the motifs obtained with from those obtained with . In some scenarios for example, motifs from transactions that all occur within the same block may be indicative to an algorithmic mechanism. If it is preferable to exclude such motifs in an effort to exclude pre-programmed behaviour, this can therefore be done by subtracting motifs with set to some time period much less than the block processing time e.g. 1 second.\nTo encourage researchers to explore this important effect of timescale choice on results using experiments like fig. 8 ###reference_###, our implementation of the motif mining algorithm in Raphtory includes the possibility to provide a range of delta values as input for the algorithm. This makes use of the observation that enumerating the static triangles in the network is typically the bottleneck of the algorithm\u2019s runtime. Having identified the edges which participate in these static motifs, running the motif counting procedure on these edges for each delta value becomes significantly faster than re-running the full algorithm for each delta value."
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"section_id": "5.3",
|
| 121 |
+
"parent_section_id": "5",
|
| 122 |
+
"section_name": "Speed of execution compared to state-of-the-art",
|
| 123 |
+
"text": "Figure 9 ###reference_### shows the time to count motifs for a range of values in the Raphtory implementation compared with what is arguably the most competitive alternative, the SNAP temporal motifs implementation. For each value on the -axis, the global motifs algorithm is run for an array of timescales: 1 hour, 2 hours, 3 hours, \u2026 , hours with the corresponding value on the axis showing the total runtime in seconds. Therefore, here represents both the number of timescales processed and the size of the largest timescale in hours. For fewer than 6 windows, SNAP is faster, which is likely because Raphtory as a more general-purpose library incurs a slightly higher initial overhead for building the graph. However, the time increment for adding more windows in Raphtory is much smaller than the alternative in SNAP which can be seen by Raphtory\u2019s smaller slope. Note that in both implementations, the time is linear in the number of values processed and that the effect of the size of timescale itself on the time taken to process is negligible.\n###figure_27###"
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"section_id": "5.4",
|
| 127 |
+
"parent_section_id": "5",
|
| 128 |
+
"section_name": "Null model",
|
| 129 |
+
"text": "To investigate whether motifs arise from temporally correlated behaviour or are simply a result of a large number of transactions we use a null model. In this case we use a randomised reference model where for our set of transactions the nodes and are kept the same and the are shuffled randomly among the transactions. This has the effect that between any node pair the same absolute number of transactions occurs over the whole study period and for any chosen time period the same number of transactions occurs in the network. We will refer to this model as the timestamp-shuffled model [8 ###reference_b8###]. For this paper, we produce 10 shuffled versions of each dataset and use the term \u201crelative to the null model\" to mean that we divide the value as found in the real dataset by the mean of the values found on the 10 shuffled versions for that dataset."
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"section_id": "5.5",
|
| 133 |
+
"parent_section_id": "5",
|
| 134 |
+
"section_name": "Transactions where time order is uncertain",
|
| 135 |
+
"text": "In a blockchain setting, transactions are released in blocks according to when they are successfully verified by a miner, typically in time intervals of around 8-10 minutes. This means that while a transaction can be initiated by a user/service at any continuous time, it is only possible to observe the block-wide timestamp of each timestamp. The order in which they occur in the block is the order in which they were verified by a miner which comes with a topological order guarantee that if transaction spends an output of then must come before in the block [20 ###reference_b20###], however there is no other guarantee on time ordering within the block. In fact, miners are incentivised to verify the transactions with the highest transaction fee first. For this work we take the block order to be the order of the transactions, but show in our supporting information that randomising intra-block orderings causes a deviation in the motif counts with a maximum at around 3%."
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"section_id": "5.6",
|
| 139 |
+
"parent_section_id": "5",
|
| 140 |
+
"section_name": "Data and code availability",
|
| 141 |
+
"text": "Each of the algorithms (a local motif counting algorithm, a global motif counting algorithm, and a global motif algorithm optimised for multiple timescales) has been implemented in the open-source library Raphtory [23 ###reference_b23###] in Rust and Python. The Alphabay and Hydra datasets are proprietary but the NFT dataset is publicly available [18 ###reference_b18###] and scripts for reproducing the NFT results can be found at https://github.com/narnolddd/motif-paper-reproduce ###reference_eproduce###. All plots were generated using Matplotlib 3.8.2 (https://matplotlib.org/stable/ ###reference_matplotlib.org/stable/###) and Seaborn 0.13.2 (https://seaborn.pydata.org/ ###reference_seaborn.pydata.org/###)."
|
| 142 |
+
}
|
| 143 |
+
],
|
| 144 |
+
"appendix": [],
|
| 145 |
+
"tables": {
|
| 146 |
+
"1": {
|
| 147 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S2.T1.8\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S2.T1.8.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S2.T1.8.1.1.1\" style=\"padding-bottom:2.15277pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.1.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S2.T1.8.1.1.2\" style=\"padding-bottom:2.15277pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.2.1\">Time range(mm/yy)</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S2.T1.8.1.1.3\" style=\"padding-bottom:2.15277pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.3.1\">Number of nodes</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S2.T1.8.1.1.4\" style=\"padding-bottom:2.15277pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.4.1\">Number of directed edges</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S2.T1.8.1.1.5\" style=\"padding-bottom:2.15277pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.8.1.1.5.1\">Number of transactions</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S2.T1.8.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.8.2.1.1\">Alphabay</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.8.2.1.2\">09/2010\u201312/2020</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.8.2.1.3\">11,021,692</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.8.2.1.4\">15,890,235</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.8.2.1.5\">33,588,967</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.8.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.8.3.2.1\">Hydra</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.8.3.2.2\">07/2010\u201312/2020</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.8.3.2.3\">20,137,164</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.8.3.2.4\">35,765,997</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S2.T1.8.3.2.5\">69,811,848</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S2.T1.8.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S2.T1.8.4.3.1\">NFT</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S2.T1.8.4.3.2\">11/2017\u201304/2021</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S2.T1.8.4.3.3\">532,945</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S2.T1.8.4.3.4\">2,991,602</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S2.T1.8.4.3.5\">6,071,027</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S2.T1.10.4.1\" style=\"font-size:90%;\">Table 1</span>: </span><span class=\"ltx_text\" id=\"S2.T1.6.6.3\" style=\"font-size:90%;\">Dataset summary. Time range (earliest to latest recorded transaction), number of nodes, number of directed edges and number of transactions. Note the distinction between <em class=\"ltx_emph ltx_font_italic\" id=\"S2.T1.6.6.3.1\">edges</em>, the number of unique pairs such that there is at least one transaction from to , and transactions. </span></figcaption>\n</figure>",
|
| 148 |
+
"capture": "Table 1: Dataset summary. Time range (earliest to latest recorded transaction), number of nodes, number of directed edges and number of transactions. Note the distinction between edges, the number of unique pairs such that there is at least one transaction from to , and transactions. "
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"image_paths": {
|
| 152 |
+
"1(a)": {
|
| 153 |
+
"figure_path": "2402.09272v2_figure_1(a).png",
|
| 154 |
+
"caption": "(a)\nFigure 1: An example of extracting a particular temporal motif from a temporal graph. (a) is an example of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif M\ud835\udc40Mitalic_M with a given \u03b4=10\ud835\udeff10\\delta=10italic_\u03b4 = 10; (b) is a temporal graph with edges appearing at the times shown on each edge; (c) shows an instance of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motifs in the temporal graph; (d) is not a \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif because the difference between the timestamp of the first temporal edge and the timestamp of the last temporal edge exceeds the given \u03b4\ud835\udeff\\deltaitalic_\u03b4.",
|
| 155 |
+
"url": "http://arxiv.org/html/2402.09272v2/x1.png"
|
| 156 |
+
},
|
| 157 |
+
"1(b)": {
|
| 158 |
+
"figure_path": "2402.09272v2_figure_1(b).png",
|
| 159 |
+
"caption": "(b)\nFigure 1: An example of extracting a particular temporal motif from a temporal graph. (a) is an example of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif M\ud835\udc40Mitalic_M with a given \u03b4=10\ud835\udeff10\\delta=10italic_\u03b4 = 10; (b) is a temporal graph with edges appearing at the times shown on each edge; (c) shows an instance of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motifs in the temporal graph; (d) is not a \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif because the difference between the timestamp of the first temporal edge and the timestamp of the last temporal edge exceeds the given \u03b4\ud835\udeff\\deltaitalic_\u03b4.",
|
| 160 |
+
"url": "http://arxiv.org/html/2402.09272v2/x2.png"
|
| 161 |
+
},
|
| 162 |
+
"1(c)": {
|
| 163 |
+
"figure_path": "2402.09272v2_figure_1(c).png",
|
| 164 |
+
"caption": "(c)\nFigure 1: An example of extracting a particular temporal motif from a temporal graph. (a) is an example of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif M\ud835\udc40Mitalic_M with a given \u03b4=10\ud835\udeff10\\delta=10italic_\u03b4 = 10; (b) is a temporal graph with edges appearing at the times shown on each edge; (c) shows an instance of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motifs in the temporal graph; (d) is not a \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif because the difference between the timestamp of the first temporal edge and the timestamp of the last temporal edge exceeds the given \u03b4\ud835\udeff\\deltaitalic_\u03b4.",
|
| 165 |
+
"url": "http://arxiv.org/html/2402.09272v2/x3.png"
|
| 166 |
+
},
|
| 167 |
+
"1(d)": {
|
| 168 |
+
"figure_path": "2402.09272v2_figure_1(d).png",
|
| 169 |
+
"caption": "(d)\nFigure 1: An example of extracting a particular temporal motif from a temporal graph. (a) is an example of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif M\ud835\udc40Mitalic_M with a given \u03b4=10\ud835\udeff10\\delta=10italic_\u03b4 = 10; (b) is a temporal graph with edges appearing at the times shown on each edge; (c) shows an instance of \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motifs in the temporal graph; (d) is not a \u03b4\ud835\udeff\\deltaitalic_\u03b4-temporal motif because the difference between the timestamp of the first temporal edge and the timestamp of the last temporal edge exceeds the given \u03b4\ud835\udeff\\deltaitalic_\u03b4.",
|
| 170 |
+
"url": "http://arxiv.org/html/2402.09272v2/x4.png"
|
| 171 |
+
},
|
| 172 |
+
"2": {
|
| 173 |
+
"figure_path": "2402.09272v2_figure_2.png",
|
| 174 |
+
"caption": "Figure 2: Possible three-edge up-to-three node motifs considered in this work, using the same enumeration as in Paranjape et al [21] with added colouring to indicate sub-types. For local motif counts, we consider an additional four two-node motifs which are exactly those pictured but with directions reversed (the direction becomes important when counting from the perspective of a node).",
|
| 175 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_2.png"
|
| 176 |
+
},
|
| 177 |
+
"3(a)": {
|
| 178 |
+
"figure_path": "2402.09272v2_figure_3(a).png",
|
| 179 |
+
"caption": "(a) Alphabay\nFigure 3: Local motif counts for nodes in the Alphabay network with \u03b4\ud835\udeff\\deltaitalic_\u03b4 set to one hour. The plot shows total motif count versus total transactions (both incoming and outgoing). The nodes are coloured by their wallet balance (total incoming transaction value minus total outgoing transaction value). Note the scale is logarithmic in both axes and in the colouring.",
|
| 180 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_3a.png"
|
| 181 |
+
},
|
| 182 |
+
"3(b)": {
|
| 183 |
+
"figure_path": "2402.09272v2_figure_3(b).png",
|
| 184 |
+
"caption": "(b) Hydra\nFigure 3: Local motif counts for nodes in the Alphabay network with \u03b4\ud835\udeff\\deltaitalic_\u03b4 set to one hour. The plot shows total motif count versus total transactions (both incoming and outgoing). The nodes are coloured by their wallet balance (total incoming transaction value minus total outgoing transaction value). Note the scale is logarithmic in both axes and in the colouring.",
|
| 185 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_3b.png"
|
| 186 |
+
},
|
| 187 |
+
"3(c)": {
|
| 188 |
+
"figure_path": "2402.09272v2_figure_3(c).png",
|
| 189 |
+
"caption": "(c) NFTs\nFigure 3: Local motif counts for nodes in the Alphabay network with \u03b4\ud835\udeff\\deltaitalic_\u03b4 set to one hour. The plot shows total motif count versus total transactions (both incoming and outgoing). The nodes are coloured by their wallet balance (total incoming transaction value minus total outgoing transaction value). Note the scale is logarithmic in both axes and in the colouring.",
|
| 190 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_3c.png"
|
| 191 |
+
},
|
| 192 |
+
"4(a)": {
|
| 193 |
+
"figure_path": "2402.09272v2_figure_4(a).png",
|
| 194 |
+
"caption": "(a) Alphabay, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 hour\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 195 |
+
"url": "http://arxiv.org/html/2402.09272v2/x5.png"
|
| 196 |
+
},
|
| 197 |
+
"4(b)": {
|
| 198 |
+
"figure_path": "2402.09272v2_figure_4(b).png",
|
| 199 |
+
"caption": "(b) Hydra, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 hour\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 200 |
+
"url": "http://arxiv.org/html/2402.09272v2/x6.png"
|
| 201 |
+
},
|
| 202 |
+
"4(c)": {
|
| 203 |
+
"figure_path": "2402.09272v2_figure_4(c).png",
|
| 204 |
+
"caption": "(c) NFTs, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 day\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 205 |
+
"url": "http://arxiv.org/html/2402.09272v2/x7.png"
|
| 206 |
+
},
|
| 207 |
+
"4(d)": {
|
| 208 |
+
"figure_path": "2402.09272v2_figure_4(d).png",
|
| 209 |
+
"caption": "(d) Alphabay vs time-shuffled, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 hour\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 210 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_4d.png"
|
| 211 |
+
},
|
| 212 |
+
"4(e)": {
|
| 213 |
+
"figure_path": "2402.09272v2_figure_4(e).png",
|
| 214 |
+
"caption": "(e) Hydra vs time-shuffled, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 hour\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 215 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_4e.png"
|
| 216 |
+
},
|
| 217 |
+
"4(f)": {
|
| 218 |
+
"figure_path": "2402.09272v2_figure_4(f).png",
|
| 219 |
+
"caption": "(f) NFTs vs time-shuffled, \u03b4=1\ud835\udeff1\\delta=1italic_\u03b4 = 1 day\nFigure 4: Global motif counts for each dataset (top) compared with a null time shuffled model (bottom) where the lower graphs show the ratio of motif count in the unshuffled versus shuffled data. (See text for axes interpretation.)",
|
| 220 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_4f.png"
|
| 221 |
+
},
|
| 222 |
+
"5(a)": {
|
| 223 |
+
"figure_path": "2402.09272v2_figure_5(a).png",
|
| 224 |
+
"caption": "(a) Alphabay (\u03b4\ud835\udeff\\deltaitalic_\u03b4=1 hour)\nFigure 5: Complementary cumulative distribution function of the motifs each node participates in, grouped into categories of similar motifs for Alphabay and Hydra and NFT. Both axes are log-scaled.",
|
| 225 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_5a.png"
|
| 226 |
+
},
|
| 227 |
+
"5(b)": {
|
| 228 |
+
"figure_path": "2402.09272v2_figure_5(b).png",
|
| 229 |
+
"caption": "(b) Hydra (\u03b4\ud835\udeff\\deltaitalic_\u03b4=1 hour)\nFigure 5: Complementary cumulative distribution function of the motifs each node participates in, grouped into categories of similar motifs for Alphabay and Hydra and NFT. Both axes are log-scaled.",
|
| 230 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_5b.png"
|
| 231 |
+
},
|
| 232 |
+
"5(c)": {
|
| 233 |
+
"figure_path": "2402.09272v2_figure_5(c).png",
|
| 234 |
+
"caption": "(c) NFT (\u03b4\ud835\udeff\\deltaitalic_\u03b4 = 1 day)\nFigure 5: Complementary cumulative distribution function of the motifs each node participates in, grouped into categories of similar motifs for Alphabay and Hydra and NFT. Both axes are log-scaled.",
|
| 235 |
+
"url": "http://arxiv.org/html/2402.09272v2/extracted/5901772/images/Figure_5c.png"
|
| 236 |
+
},
|
| 237 |
+
"6": {
|
| 238 |
+
"figure_path": "2402.09272v2_figure_6.png",
|
| 239 |
+
"caption": "Figure 6: Motif \u2018signatures\u2019 among the top 10 entities in Alphabay, Hydra and NFTs. Each column is obtained by taking an entity\u2019s motif vector and dividing by the total number of motifs for that entity, relative to the motif split in the timestamp-shuffled model. The entities are displayed left to right in descending order of total motif counts.",
|
| 240 |
+
"url": "http://arxiv.org/html/2402.09272v2/x8.png"
|
| 241 |
+
},
|
| 242 |
+
"7(a)": {
|
| 243 |
+
"figure_path": "2402.09272v2_figure_7(a).png",
|
| 244 |
+
"caption": "(a) Monthly graph-wide motif counts over time for the Alphabay dataset separated by type. The dashed line shows the monthly number of transactions.\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 245 |
+
"url": "http://arxiv.org/html/2402.09272v2/x9.png"
|
| 246 |
+
},
|
| 247 |
+
"7(b)": {
|
| 248 |
+
"figure_path": "2402.09272v2_figure_7(b).png",
|
| 249 |
+
"caption": "(b) The monthly counts of different motif types in Alphabay as a proportion of all motifs. The classification of motifs and colour coding is the same as that used in (a).\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 250 |
+
"url": "http://arxiv.org/html/2402.09272v2/x10.png"
|
| 251 |
+
},
|
| 252 |
+
"7(c)": {
|
| 253 |
+
"figure_path": "2402.09272v2_figure_7(c).png",
|
| 254 |
+
"caption": "(c) Monthly graph-wide motif counts over time for the Hydra dataset separated by type. The dashed line shows the monthly number of transactions.\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 255 |
+
"url": "http://arxiv.org/html/2402.09272v2/x11.png"
|
| 256 |
+
},
|
| 257 |
+
"7(d)": {
|
| 258 |
+
"figure_path": "2402.09272v2_figure_7(d).png",
|
| 259 |
+
"caption": "(d) The monthly counts of different motif types in Hydra as a proportion of all motifs. The classification of motifs and colour coding is the same as that used in (a).\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 260 |
+
"url": "http://arxiv.org/html/2402.09272v2/x12.png"
|
| 261 |
+
},
|
| 262 |
+
"7(e)": {
|
| 263 |
+
"figure_path": "2402.09272v2_figure_7(e).png",
|
| 264 |
+
"caption": "(e) Monthly graph-wide motif counts over time for the NFT trade dataset separated by type. The dashed line shows the monthly number of transactions.\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 265 |
+
"url": "http://arxiv.org/html/2402.09272v2/x13.png"
|
| 266 |
+
},
|
| 267 |
+
"7(f)": {
|
| 268 |
+
"figure_path": "2402.09272v2_figure_7(f).png",
|
| 269 |
+
"caption": "(f) The monthly counts of different motif types in the NFT market as a proportion of all motifs. The classification of motifs and colour coding is the same as that used in (a).\nFigure 7: Results on 1-hour motif counts for both datasets on a month-by-month basis. The left hand plots show the total monthly motif counts for each dataset grouped by motif types; the right hand shows these counts as a proportion of all motifs present.",
|
| 270 |
+
"url": "http://arxiv.org/html/2402.09272v2/x14.png"
|
| 271 |
+
},
|
| 272 |
+
"8(a)": {
|
| 273 |
+
"figure_path": "2402.09272v2_figure_8(a).png",
|
| 274 |
+
"caption": "(a) Alphabay transactions\nFigure 8: The number of each type of motif as \u03b4\ud835\udeff\\deltaitalic_\u03b4 the timescale is increased a single hour at a time for (a) Alphabay (b) NFT purchases. A peak in the graph at a timescale of t\ud835\udc61titalic_t hours shows that a large number of motifs are introduced by moving the timescale from t\u22121\ud835\udc611t-1italic_t - 1 to t\ud835\udc61titalic_t hours. In each graph, the blue line shows the count for the real dataset and the red line shows the mean value for the timestamp shuffled data calculated over 10 realisations, with standard deviation error bars.",
|
| 275 |
+
"url": "http://arxiv.org/html/2402.09272v2/x15.png"
|
| 276 |
+
},
|
| 277 |
+
"8(b)": {
|
| 278 |
+
"figure_path": "2402.09272v2_figure_8(b).png",
|
| 279 |
+
"caption": "(b) NFT purchases\nFigure 8: The number of each type of motif as \u03b4\ud835\udeff\\deltaitalic_\u03b4 the timescale is increased a single hour at a time for (a) Alphabay (b) NFT purchases. A peak in the graph at a timescale of t\ud835\udc61titalic_t hours shows that a large number of motifs are introduced by moving the timescale from t\u22121\ud835\udc611t-1italic_t - 1 to t\ud835\udc61titalic_t hours. In each graph, the blue line shows the count for the real dataset and the red line shows the mean value for the timestamp shuffled data calculated over 10 realisations, with standard deviation error bars.",
|
| 280 |
+
"url": "http://arxiv.org/html/2402.09272v2/x16.png"
|
| 281 |
+
},
|
| 282 |
+
"9": {
|
| 283 |
+
"figure_path": "2402.09272v2_figure_9.png",
|
| 284 |
+
"caption": "Figure 9: The time taken to count motifs for a range of \u03b4\ud835\udeff\\deltaitalic_\u03b4 values. For each value \u03b4\ud835\udeff\\deltaitalic_\u03b4 on the x\ud835\udc65xitalic_x-axis, the corresponding y\ud835\udc66yitalic_y value is the time taken to count motifs for the input array [1\u2062 hour,2\u2062 hours,\u2026,\u03b4\u2062 hours]1 hour2 hours\u2026\ud835\udeff hours\\left[1\\text{ hour},2\\text{ hours},\\dots,\\delta\\text{ hours}\\right][ 1 hour , 2 hours , \u2026 , italic_\u03b4 hours ]. The result is averaged over 10 experiments with shaded 95% confidence intervals.",
|
| 285 |
+
"url": "http://arxiv.org/html/2402.09272v2/x17.png"
|
| 286 |
+
}
|
| 287 |
+
},
|
| 288 |
+
"validation": true,
|
| 289 |
+
"references": [
|
| 290 |
+
{
|
| 291 |
+
"1": {
|
| 292 |
+
"title": "Input-output relationship in social communications characterized by\nspike train analysis.",
|
| 293 |
+
"author": "T. Aoki, T. Takaguchi, R. Kobayashi, and R. Lambiotte.",
|
| 294 |
+
"venue": "Physical Review E, 94(4):042313, 2016.",
|
| 295 |
+
"url": null
|
| 296 |
+
}
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"2": {
|
| 300 |
+
"title": "Moving with the times: investigating the alt-right network Gab with\ntemporal interaction graphs.",
|
| 301 |
+
"author": "N. A. Arnold, B. Steer, I. Hafnaoui, H. A. Parada G, R. J. Mondrag\u00f3n,\nF. Cuadrado, and R. G. Clegg.",
|
| 302 |
+
"venue": "Proceedings of the ACM on Human-Computer Interaction,\n5(CSCW2):1\u201317, 2021.",
|
| 303 |
+
"url": null
|
| 304 |
+
}
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"3": {
|
| 308 |
+
"title": "Characterizing growth in decentralized socio-economic networks\nthrough triadic closure-related network motifs.",
|
| 309 |
+
"author": "C. T. Ba, M. Zignani, and S. Gaito.",
|
| 310 |
+
"venue": "Online Social Networks and Media, 37-38:100266, 2023.",
|
| 311 |
+
"url": null
|
| 312 |
+
}
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"4": {
|
| 316 |
+
"title": "Temporal network analysis of email communication patterns in a long\nstanding hierarchy.",
|
| 317 |
+
"author": "M. R. Barnes, M. Karan, S. McQuistin, C. Perkins, G. Tyson, M. Purver,\nI. Castro, and R. G. Clegg.",
|
| 318 |
+
"venue": "arXiv preprint arXiv:2311.13442, 2023.",
|
| 319 |
+
"url": null
|
| 320 |
+
}
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"5": {
|
| 324 |
+
"title": "Deep learning Ethereum token price prediction with network motif\nanalysis.",
|
| 325 |
+
"author": "Y. Chen and H. K. T. Ng.",
|
| 326 |
+
"venue": "In 2019 International Conference on Data Mining Workshops\n(ICDMW), pages 232\u2013237. IEEE, 2019.",
|
| 327 |
+
"url": null
|
| 328 |
+
}
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"6": {
|
| 332 |
+
"title": "Collective dynamics of dark web marketplaces.",
|
| 333 |
+
"author": "A. ElBahrawy, L. Alessandretti, L. Rusnac, D. Goldsmith, A. Teytelboym, and\nA. Baronchelli.",
|
| 334 |
+
"venue": "Scientific reports, 10(1):1\u20138, 2020.",
|
| 335 |
+
"url": null
|
| 336 |
+
}
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"7": {
|
| 340 |
+
"title": "Disentangling the growth of blockchain-based networks by graph\nevolution rule mining.",
|
| 341 |
+
"author": "A. Galdeman, M. Zignani, and S. Gaito.",
|
| 342 |
+
"venue": "In 2022 IEEE 9th International Conference on Data Science and\nAdvanced Analytics (DSAA), pages 1\u201310, 2022.",
|
| 343 |
+
"url": null
|
| 344 |
+
}
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"8": {
|
| 348 |
+
"title": "Randomized reference models for temporal networks.",
|
| 349 |
+
"author": "L. Gauvin, M. G\u00e9nois, M. Karsai, M. Kivel\u00e4, T. Takaguchi, E. Valdano,\nand C. L. Vestergaard.",
|
| 350 |
+
"venue": "SIAM Review, 64(4):763\u2013830, 2022.",
|
| 351 |
+
"url": null
|
| 352 |
+
}
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"9": {
|
| 356 |
+
"title": "Graphsense: A general-purpose cryptoasset analytics platform.",
|
| 357 |
+
"author": "B. Haslhofer, R. St\u00fctz, M. Romiti, and R. King.",
|
| 358 |
+
"venue": "arXiv preprint arXiv:2102.13613, 2021.",
|
| 359 |
+
"url": null
|
| 360 |
+
}
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"10": {
|
| 364 |
+
"title": "Local structure in social networks.",
|
| 365 |
+
"author": "P. W. Holland and S. Leinhardt.",
|
| 366 |
+
"venue": "Sociological methodology, 7:1\u201345, 1976.",
|
| 367 |
+
"url": null
|
| 368 |
+
}
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"11": {
|
| 372 |
+
"title": "Temporal motifs in time-dependent networks.",
|
| 373 |
+
"author": "L. Kovanen, M. Karsai, K. Kaski, J. Kert\u00e9sz, and J. Saram\u00e4ki.",
|
| 374 |
+
"venue": "Journal of Statistical Mechanics: Theory and Experiment,\n2011(11):P11005, 2011.",
|
| 375 |
+
"url": null
|
| 376 |
+
}
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"12": {
|
| 380 |
+
"title": "Temporal motifs reveal homophily, gender-specific patterns, and group\ntalk in call sequences.",
|
| 381 |
+
"author": "L. Kovanen, K. Kaski, J. Kert\u00e9sz, and J. Saram\u00e4ki.",
|
| 382 |
+
"venue": "Proceedings of the National Academy of Sciences,\n110(45):18070\u201318075, 2013.",
|
| 383 |
+
"url": null
|
| 384 |
+
}
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"13": {
|
| 388 |
+
"title": "Measurements, analyses, and insights on the entire Ethereum\nblockchain network.",
|
| 389 |
+
"author": "X. T. Lee, A. Khan, S. Sen Gupta, Y. H. Ong, and X. Liu.",
|
| 390 |
+
"venue": "In Proceedings of The Web Conference 2020, pages 155\u2013166,\n2020.",
|
| 391 |
+
"url": null
|
| 392 |
+
}
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"14": {
|
| 396 |
+
"title": "Snap: A general-purpose network analysis and graph-mining library.",
|
| 397 |
+
"author": "J. Leskovec and R. Sosi\u010d.",
|
| 398 |
+
"venue": "ACM Transactions on Intelligent Systems and Technology (TIST),\n8(1):1, 2016.",
|
| 399 |
+
"url": null
|
| 400 |
+
}
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"15": {
|
| 404 |
+
"title": "Temporal motifs for financial networks: A study on Mercari, JPMC,\nand Venmo platforms.",
|
| 405 |
+
"author": "P. Liu, R. Acharyya, R. E. Tillman, S. Kimura, N. Masuda, and A. E.\nSar\u0131y\u00fcce.",
|
| 406 |
+
"venue": "arXiv preprint arXiv:2301.07791, 2023.",
|
| 407 |
+
"url": null
|
| 408 |
+
}
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"16": {
|
| 412 |
+
"title": "Temporal motifs in patent opposition and collaboration networks.",
|
| 413 |
+
"author": "P. Liu, N. Masuda, T. Kito, and A. E. Sar\u0131y\u00fcce.",
|
| 414 |
+
"venue": "Scientific reports, 12(1):1917, 2022.",
|
| 415 |
+
"url": null
|
| 416 |
+
}
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"17": {
|
| 420 |
+
"title": "Network motifs: Simple building blocks of complex networks.",
|
| 421 |
+
"author": "R. Milo, S. Shen-Orr, S. Itzkovitz, N. Kashtan, D. Chklovskii, and U. Alon.",
|
| 422 |
+
"venue": "Science, 298(5594):824\u2013827, 2002.",
|
| 423 |
+
"url": null
|
| 424 |
+
}
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"18": {
|
| 428 |
+
"title": "Mapping the NFT revolution: Market trends, trade networks and\nvisual features, Sept. 2021.",
|
| 429 |
+
"author": "M. Nadini, L. Alessandretti, F. Di Giacinto, M. Martino, L. M. Aiello, and\nA. Baronchelli.",
|
| 430 |
+
"venue": null,
|
| 431 |
+
"url": null
|
| 432 |
+
}
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"19": {
|
| 436 |
+
"title": "Emergence and structure of decentralised trade networks around dark\nweb marketplaces.",
|
| 437 |
+
"author": "M. Nadini, A. Bracci, A. ElBahrawy, P. Gradwell, A. Teytelboym, and\nA. Baronchelli.",
|
| 438 |
+
"venue": "Scientific reports, 12(1):1\u20139, 2022.",
|
| 439 |
+
"url": null
|
| 440 |
+
}
|
| 441 |
+
},
|
| 442 |
+
{
|
| 443 |
+
"20": {
|
| 444 |
+
"title": "Bitcoin: A peer-to-peer electronic cash system.",
|
| 445 |
+
"author": "S. Nakamoto.",
|
| 446 |
+
"venue": "Decentralized business review, page 21260, 2008.",
|
| 447 |
+
"url": null
|
| 448 |
+
}
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"21": {
|
| 452 |
+
"title": "Motifs in temporal networks.",
|
| 453 |
+
"author": "A. Paranjape, A. R. Benson, and J. Leskovec.",
|
| 454 |
+
"venue": "In Proceedings of the tenth ACM international conference on web\nsearch and data mining, pages 601\u2013610, 2017.",
|
| 455 |
+
"url": null
|
| 456 |
+
}
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"22": {
|
| 460 |
+
"title": "Motifs in brain networks.",
|
| 461 |
+
"author": "O. Sporns and R. K\u00f6tter.",
|
| 462 |
+
"venue": "PLoS biology, 2(11):e369, 2004.",
|
| 463 |
+
"url": null
|
| 464 |
+
}
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"23": {
|
| 468 |
+
"title": "Raphtory: The temporal graph engine for Rust and Python.",
|
| 469 |
+
"author": "B. Steer, N. A. Arnold, C. Tidiane, R. Lambiotte, H. Yousaf, L. Jeub,\nF. Murariu, S. Kapoor, P. Rico, R. Chan, L. Chan, J. Alford, R. G. Clegg,\nF. Cuadrado, M. R. Barnes, P. Zhong, J. Pougu\u00e9-Biyong, and A. Alnaimi.",
|
| 470 |
+
"venue": "Journal of Open Source Software, 9(95):5940, 2024.",
|
| 471 |
+
"url": null
|
| 472 |
+
}
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"24": {
|
| 476 |
+
"title": "Raphtory: Streaming analysis of distributed temporal graphs.",
|
| 477 |
+
"author": "B. Steer, F. Cuadrado, and R. Clegg.",
|
| 478 |
+
"venue": "Future Generation Computer Systems, 102:453\u2013464, 2020.",
|
| 479 |
+
"url": null
|
| 480 |
+
}
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"25": {
|
| 484 |
+
"title": "NFT wash trading: Quantifying suspicious behaviour in NFT\nmarkets.",
|
| 485 |
+
"author": "V. von Wachter, J. R. Jensen, F. Regner, and O. Ross.",
|
| 486 |
+
"venue": "arXiv preprint arXiv:2202.03866, 2022.",
|
| 487 |
+
"url": null
|
| 488 |
+
}
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"26": {
|
| 492 |
+
"title": "The topological relationship between the large-scale attributes and\nlocal interaction patterns of complex networks.",
|
| 493 |
+
"author": "A. V\u00e1zquez, R. Dobrin, D. Sergi, J.-P. Eckmann, Z. N. Oltvai, and A.-L.\nBarab\u00e1si.",
|
| 494 |
+
"venue": "Proceedings of the National Academy of Sciences,\n101(52):17940\u201317945, 2004.",
|
| 495 |
+
"url": null
|
| 496 |
+
}
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"27": {
|
| 500 |
+
"title": "Detecting ethereum phishing scams with temporal motif features of\nsubgraph.",
|
| 501 |
+
"author": "Y. Wang, H. Wang, X. Lu, L. Zhou, and L. Liu.",
|
| 502 |
+
"venue": "In 2023 IEEE Symposium on Computers and Communications (ISCC),\npages 631\u2013636. IEEE, 2023.",
|
| 503 |
+
"url": null
|
| 504 |
+
}
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"28": {
|
| 508 |
+
"title": "The shape of memory in temporal networks.",
|
| 509 |
+
"author": "O. E. Williams, L. Lacasa, A. P. Mill\u00e1n, and V. Latora.",
|
| 510 |
+
"venue": "Nature communications, 13(1):499, 2022.",
|
| 511 |
+
"url": null
|
| 512 |
+
}
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"29": {
|
| 516 |
+
"title": "Detecting mixing services via mining Bitcoin transaction network\nwith hybrid motifs.",
|
| 517 |
+
"author": "J. Wu, J. Liu, W. Chen, H. Huang, Z. Zheng, and Y. Zhang.",
|
| 518 |
+
"venue": "IEEE Transactions on Systems, Man, and Cybernetics: Systems,\n52(4):2237\u20132249, 2021.",
|
| 519 |
+
"url": null
|
| 520 |
+
}
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"30": {
|
| 524 |
+
"title": "Know your transactions: Real-time and generic transaction semantic\nrepresentation on blockchain & web3 ecosystem.",
|
| 525 |
+
"author": "Z. Wu, J. Liu, J. Wu, Z. Zheng, X. Luo, and T. Chen.",
|
| 526 |
+
"venue": "In Proceedings of the ACM Web Conference 2023, pages\n1918\u20131927, 2023.",
|
| 527 |
+
"url": null
|
| 528 |
+
}
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"31": {
|
| 532 |
+
"title": "Revealing the hidden language of complex networks.",
|
| 533 |
+
"author": "\u00d6. N. Yavero\u011flu, N. Malod-Dognin, D. Davis, Z. Levnajic, V. Janjic,\nR. Karapandza, A. Stojmirovic, and N. Pr\u017eulj.",
|
| 534 |
+
"venue": "Scientific reports, 4(1):4547, 2014.",
|
| 535 |
+
"url": null
|
| 536 |
+
}
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"32": {
|
| 540 |
+
"title": "Non-Markovian paths and cycles in NFT trades.",
|
| 541 |
+
"author": "H. Yousaf, N. A. Arnold, R. Lambiotte, T. LaRock, R. G. Clegg, P. Zhong,\nA. Alnaimi, and B. Steer.",
|
| 542 |
+
"venue": "arXiv preprint arXiv:2303.11181, 2023.",
|
| 543 |
+
"url": null
|
| 544 |
+
}
|
| 545 |
+
}
|
| 546 |
+
],
|
| 547 |
+
"url": "http://arxiv.org/html/2402.09272v2"
|
| 548 |
+
}
|
20241004/2402.17512v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2403.07721v7.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2403.12025v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2403.15744v6.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2404.13477v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2404.17451v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2404.18533v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2405.07649v2.json
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Efficient Matrix Factorization Via Householder Reflections",
|
| 3 |
+
"abstract": "Motivated by orthogonal dictionary learning problems, we propose a novel method for matrix factorization, where the data matrix is a product of a Householder matrix and a binary matrix . First, we show that the exact recovery of the factors and from is guaranteed with columns in . Next, we show approximate recovery (in the sense) can be done in polynomial time() with columns in . We hope the techniques in this work help in developing alternate algorithms for orthogonal dictionary learning.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The orthogonal dictionary factorization problem is posed as follows: Given a matrix , can we find an orthogonal matrix and a coefficient matrix such that . Variants of this problem appear in standard sparse signal processing literature [4 ###reference_b4###] and signal processing-based graph learning approaches [3 ###reference_b3###], [8 ###reference_b8###]. In the latter, the orthogonal matrix is known to be an eigenvector matrix of a suitable graph; in particular, we note that the orthogonal matrix has some additional structure. The goal of this work is to investigate recovery guarantees on and under strong structural assumptions on the orthogonal matrix.\nThe standard unstructured dictionary learning problem () has been well investigated in literature. The seminal work of Olshausen et. al in 1997 [9 ###reference_b9###] involved recovering sparse representations of an image. Since then, extensive research has led to several fascinating methods to solve the aforementioned problem. Engan et al. [10 ###reference_b10###] proposed the method of optimal directions (MOD), while Aharon et al. [11 ###reference_b11###] proposed the K-SVD method for sparse representations of signals. Mairal et. al provided an online dictionary learning method for sparse coding [12 ###reference_b12###], which was further improved by adding sparsity constraints. It has been shown [20 ###reference_b20###] that recovering and under the assumption of a Bernoulli-Gaussian model on for an arbitrary dictionary can be reduced to the orthogonal dictionary learning problem ().\nNew algorithms for orthogonal dictionary learning, based on alternate minimization, were proposed by Arora et. al [14 ###reference_b14###], [13 ###reference_b13###], while some results obtained on studying local identifiability [15 ###reference_b15###], [16 ###reference_b16###] have also been in the mix. Li et. al [19 ###reference_b19###] used alternating gradient descent for non-negative matrix factorization with strong correlations. Traditionally, minimization and its convex relaxation- minimization have been widely used in this field (Spielman et al. [22 ###reference_b22###]). Recently, Zhai et al. [21 ###reference_b21###] developed an iterative approach for complete dictionary learning via minimization over an orthogonal group.\nTheoretical results pertinent to the above problem are usually of two kinds: proving the validity of proposed algorithms, and identifying fundamental conditions (i.e. number of columns required) for any algorithm to recover the factors and .\nThis work focuses on the problem of orthogonal dictionary learning and is motivated by the following observations:\nSome applications, specifically graph learning, place additional structural assumptions on the orthogonal dictionary\nMost of the existing work is on unstructured orthogonal dictionary learning\nMost of the existing techniques are iterative, and are sensitive to initialization [23 ###reference_b23###]\nThis naturally motivates us to investigate the effect of significant structural constraints on the orthogonal matrix. We start the above investigation by assuming that the orthogonal matrix is a Householder matrix. We note that every orthogonal matrix can be expressed as a product of Householder matrices [5 ###reference_b5###, 6 ###reference_b6###], thus allowing for the development of a new procedure to solve the orthogonal dictionary factorization problem in the future. Solutions to this problem could potentially lead to a new set of non-iterative and initialization-free algorithms to solve the above problem.\nIn this paper, we analyze the fundamental unit of orthogonal matrices, Householder matrices, as a first step to solving the general orthogonal dictionary factorization problem. We utilize the correlation between the entries of a Householder matrix to develop an algorithm which can recover the factor matrices completely deterministically. As compared to the [22 ###reference_b22###] bound on the number of columns in the coefficient matrix for orthogonal dictionary learning, we show that recovery of Householder matrices is possible in columns; albeit with exponential time. To recover the matrices in polynomial time, we allow for some small errors. We show that (with a Bernoulli model on the coefficient matrix), not only is the recovery possible in polynomial time, but it achieves the columns bound in the sense. It is also an \u2019all at a time\u2019 recovery, i.e., it doesn\u2019t find the matrices one column at a time. This paves the way for a new approach to possibly solve the orthogonal dictionary factorization problem. As compared to previous methods, our technique is completely free of initialization and is a non-iterative approach with theoretical guarantees for recovery. The computational complexity involved in the calculation of the factors is , which is substantially smaller than previous methods such as [21 ###reference_b21###]."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Problem Formulation",
|
| 15 |
+
"text": "We describe the 2 primary scenarios under which we solve our problem."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Matrix Recovery for General Binary Matrix",
|
| 21 |
+
"text": "Consider the following setup:\nFollowing up on the introduction, consider the product\n, where is an orthogonal matrix for some (unknown) unit vector . We refer by the entries of . Given the data matrix , we want to try and estimates (and consequently the vector ) and . We also want to establish why this is tractable for a binary matrix but not for a general matrix. Our error metric is . Some of our results pertain to perfect recovery, i.e. . \nNote that we say if for some constant depending only on ; for all large enough."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "Matrix Recovery for Bernoulli Matrix",
|
| 27 |
+
"text": "We make the following changes to the previous case:\nDefine a sparsity model on , where elements are filled by drawing from an iid Bernoulli distribution with a parameter ."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Main Results",
|
| 33 |
+
"text": ""
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Matrix Recovery for General Binary Matrix",
|
| 39 |
+
"text": "(Zero error achievability) For the general model, , where and is an arbitrary binary matrix, can be uniquely recovered with columns in (In fact, just two (distinct) columns suffice). Note that there is no assumption of a probabilistic model for the entries of .\nIf is an arbitrary (non-binary) matrix, cannot be uniquely recovered (even up to permutation and sign) with any\nnumber of columns . Thus, the assumption that is a binary matrix is justified for recovery of the Householder dictionary."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Matrix Recovery for Bernoulli Matrix",
|
| 45 |
+
"text": "(Parameter Recovery): Consider the parametric model on the coefficient matrix described in (1 ###reference_###). For any orthogonal matrix , the Bernoulli parameter can be recovered with the probability of error upper bounded by the following relation:\nThus, recovery with high accuracy is possible in columns. The computational complexity of calculating is .\n(Householder Recovery) Consider the parametric model described in (1 ###reference_###). Then, the unit vector defining the Householder matrix can be recovered with high accuracy with the following recovery guarantee\nwhere and . Thus for\nThe computational complexity involved in calculating is ."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Algorithms",
|
| 51 |
+
"text": "The first algorithm pertains to zero error recovery of the Householder matrix and the corresponding unit vector .We show that a brute-force elimination of possibilities on the columns of (see Algorithm 1 ###reference_###) uniquely identifies the vector (Theorem 1 ###reference_orem1###). We show that any solution to the problem is unique if it exists.\nInput: \nOutput:\nThe rest of our algorithms operate by exploiting the correlation in the entries of that manifest due to the Householder structure of the matrix (see Algorithm 3 ###reference_###). The proofs involve using suitable concentrations on the empirical correlations.\nInput: \nOutput:\nInput: \nOutput:\nRemark In Algorithm 3 ###reference_###, is the hard threshold operator, with . This value has been chosen heuristically. Furthermore, we are implicitly using the following: if is a solution, then is also a solution, as both produce the same householder matrix."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "5",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Simulations",
|
| 57 |
+
"text": "Figure 1 ###reference_### illustrates the error in obtained from experimental results. The ground truth and matrices are generated arbitrarily as per the conditions specified above. The number of rows is set to and the number of columns is varied from onwards. The -axis denotes the error in , and the -axis represents the number of columns in . The plots are generated for two values of , and . The error decreases with an increase in the number of columns, as expected theoretically. The error when is slightly higher than the corresponding error for , which is consistent with Theorem 3 ###reference_orem3###.\n###figure_1### ###figure_2###"
|
| 58 |
+
}
|
| 59 |
+
],
|
| 60 |
+
"appendix": [],
|
| 61 |
+
"tables": {},
|
| 62 |
+
"image_paths": {
|
| 63 |
+
"1(a)": {
|
| 64 |
+
"figure_path": "2405.07649v2_figure_1(a).png",
|
| 65 |
+
"caption": "Figure 1: Infinity norm error for varying number of columns (\u03b8=0.1,\u03b8=0.4formulae-sequence\ud835\udf030.1\ud835\udf030.4\\theta=0.1,\\theta=0.4italic_\u03b8 = 0.1 , italic_\u03b8 = 0.4)",
|
| 66 |
+
"url": "http://arxiv.org/html/2405.07649v2/extracted/5901016/L_inf_error_diff_p_n1000_theta0.1.png"
|
| 67 |
+
},
|
| 68 |
+
"1(b)": {
|
| 69 |
+
"figure_path": "2405.07649v2_figure_1(b).png",
|
| 70 |
+
"caption": "Figure 1: Infinity norm error for varying number of columns (\u03b8=0.1,\u03b8=0.4formulae-sequence\ud835\udf030.1\ud835\udf030.4\\theta=0.1,\\theta=0.4italic_\u03b8 = 0.1 , italic_\u03b8 = 0.4)",
|
| 71 |
+
"url": "http://arxiv.org/html/2405.07649v2/extracted/5901016/L_inf_error_diff_p_n1000_theta0.4.png"
|
| 72 |
+
}
|
| 73 |
+
},
|
| 74 |
+
"validation": true,
|
| 75 |
+
"references": [],
|
| 76 |
+
"url": "http://arxiv.org/html/2405.07649v2"
|
| 77 |
+
}
|
20241004/2405.15067v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2406.06449v2.json
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Formatting Instructions for ICLR 2025 Conference Submissions",
|
| 3 |
+
"abstract": "The abstract paragraph should be indented 1/2 inch (3 picas) on both left and\nright-hand margins. Use 10 point type, with a vertical spacing of 11 points.\nThe word Abstract must be centered, in small caps, and in point size 12. Two\nline spaces precede the abstract. The abstract must be limited to one\nparagraph.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Submission of conference papers to ICLR 2025",
|
| 9 |
+
"text": "ICLR requires electronic submissions, processed by\nhttps://openreview.net/ ###reference_openreview.net/###. See ICLR\u2019s website for more instructions.\nIf your paper is ultimately accepted, the statement \\iclrfinalcopy should be inserted to adjust the\nformat to the camera ready requirements.\nThe format for the submissions is a variant of the NeurIPS format.\nPlease read carefully the instructions below, and follow them\nfaithfully."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "Style",
|
| 15 |
+
"text": "Papers to be submitted to ICLR 2025 must be prepared according to the\ninstructions presented here.\nAuthors are required to use the ICLR LaTeX style files obtainable at the\nICLR website. Please make sure you use the current files and\nnot previous versions. Tweaking the style files may be grounds for rejection."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "Retrieval of style files",
|
| 21 |
+
"text": "The style files for ICLR and other conference information are available online at:\nhttp://www.iclr.cc/ ###reference_ww.iclr.cc/###\nThe file iclr2025_conference.pdf contains these\ninstructions and illustrates the\nvarious formatting requirements your ICLR paper must satisfy.\nSubmissions must be made using LaTeX and the style files\niclr2025_conference.sty and iclr2025_conference.bst (to be used with LaTeX2e). The file\niclr2025_conference.tex may be used as a \u201cshell\u201d for writing your paper. All you\nhave to do is replace the author, title, abstract, and text of the paper with\nyour own.\nThe formatting instructions contained in these style files are summarized in\nsections 2 ###reference_###, 3 ###reference_###, and 4 ###reference_### below."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "General formatting instructions",
|
| 27 |
+
"text": "The text must be confined within a rectangle 5.5 inches (33 picas) wide and\n9 inches (54 picas) long. The left margin is 1.5 inch (9 picas).\nUse 10 point type with a vertical spacing of 11 points. Times New Roman is the\npreferred typeface throughout. Paragraphs are separated by 1/2 line space,\nwith no indentation.\nPaper title is 17 point, in small caps and left-aligned.\nAll pages should start at 1 inch (6 picas) from the top of the page.\nAuthors\u2019 names are\nset in boldface, and each name is placed above its corresponding\naddress. The lead author\u2019s name is to be listed first, and\nthe co-authors\u2019 names are set to follow. Authors sharing the\nsame address can be on the same line.\nPlease pay special attention to the instructions in section 4 ###reference_###\nregarding figures, tables, acknowledgments, and references.\nThere will be a strict upper limit of 10 pages for the main text of the initial submission, with unlimited additional pages for citations."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Headings: first level",
|
| 33 |
+
"text": "First level headings are in small caps,\nflush left and in point size 12. One line space before the first level\nheading and 1/2 line space after the first level heading."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "Headings: second level",
|
| 39 |
+
"text": "Second level headings are in small caps,\nflush left and in point size 10. One line space before the second level\nheading and 1/2 line space after the second level heading."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.1.1",
|
| 43 |
+
"parent_section_id": "3.1",
|
| 44 |
+
"section_name": "3.1.1 Headings: third level",
|
| 45 |
+
"text": "Third level headings are in small caps,\nflush left and in point size 10. One line space before the third level\nheading and 1/2 line space after the third level heading."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Citations, figures, tables, references",
|
| 51 |
+
"text": "These instructions apply to everyone, regardless of the formatter being used."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.1",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Citations within the text",
|
| 57 |
+
"text": "Citations within the text should be based on the natbib package\nand include the authors\u2019 last names and year (with the \u201cet al.\u201d construct\nfor more than two authors). When the authors or the publication are\nincluded in the sentence, the citation should not be in parenthesis using \\citet{} (as\nin \u201cSee Hinton06 for more information.\u201d). Otherwise, the citation\nshould be in parenthesis using \\citep{} (as in \u201cDeep learning shows promise to make progress\ntowards AI (Bengio+chapter2007).\u201d).\nThe corresponding references are to be listed in alphabetical order of\nauthors, in the References section. As to the format of the\nreferences themselves, any style is acceptable as long as it is used\nconsistently."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.2",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Footnotes",
|
| 63 |
+
"text": "Indicate footnotes with a number111Sample of the first footnote in the\ntext. Place the footnotes at the bottom of the page on which they appear.\nPrecede the footnote with a horizontal rule of 2 inches\n(12 picas).222Sample of the second footnote"
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.3",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Figures",
|
| 69 |
+
"text": "All artwork must be neat, clean, and legible. Lines should be dark\nenough for purposes of reproduction; art work should not be\nhand-drawn. The figure number and caption always appear after the\nfigure. Place one line space before the figure caption, and one line\nspace after the figure. The figure caption is lower case (except for\nfirst word and proper nouns); figures are numbered consecutively.\nMake sure the figure caption does not get separated from the figure.\nLeave sufficient space to avoid splitting the figure and figure caption.\nYou may use color figures.\nHowever, it is best for the\nfigure captions and the paper body to make sense if the paper is printed\neither in black/white or in color."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.4",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Tables",
|
| 75 |
+
"text": "All tables must be centered, neat, clean and legible. Do not use hand-drawn\ntables. The table number and title always appear before the table. See\nTable 1 ###reference_###.\nPlace one line space before the table title, one line space after the table\ntitle, and one line space after the table. The table title must be lower case\n(except for first word and proper nouns); tables are numbered consecutively."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Default Notation",
|
| 81 |
+
"text": "In an attempt to encourage standardized notation, we have included the\nnotation file from the textbook, Deep Learning\ngoodfellow2016deep available at\nhttps://github.com/goodfeli/dlbook_notation/ ###reference_n/###. Use of this style\nis not required and can be disabled by commenting out\nmath_commands.tex.\nNumbers and Arrays\nSets and Graphs\nIndexing\nCalculus\nProbability and Information Theory\nFunctions"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "6",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Final instructions",
|
| 87 |
+
"text": "Do not change any aspects of the formatting parameters in the style files.\nIn particular, do not modify the width or length of the rectangle the text\nshould fit into, and do not change font sizes (except perhaps in the\nReferences section; see below). Please note that pages should be\nnumbered."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "7",
|
| 91 |
+
"parent_section_id": null,
|
| 92 |
+
"section_name": "Preparing PostScript or PDF files",
|
| 93 |
+
"text": "Please prepare PostScript or PDF files with paper size \u201cUS Letter\u201d, and\nnot, for example, \u201cA4\u201d. The -t\nletter option on dvips will produce US Letter files.\nConsider directly generating PDF files using pdflatex\n(especially if you are a MiKTeX user).\nPDF figures must be substituted for EPS figures, however.\nOtherwise, please generate your PostScript and PDF files with the following commands:"
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "7.1",
|
| 97 |
+
"parent_section_id": "7",
|
| 98 |
+
"section_name": "Margins in LaTeX",
|
| 99 |
+
"text": "Most of the margin problems come from figures positioned by hand using\n\\special or other commands. We suggest using the command\n\\includegraphics\nfrom the graphicx package. Always specify the figure width as a multiple of\nthe line width as in the example below using .eps graphics\nor\nfor .pdf graphics.\nSee section 4.4 in the graphics bundle documentation (http://www.ctan.org/tex-archive/macros/latex/required/graphics/grfguide.ps ###reference_ex/required/graphics/grfguide.ps###)\nA number of width problems arise when LaTeX cannot properly hyphenate a\nline. Please give LaTeX hyphenation hints using the \\- command."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "7.1.x",
|
| 103 |
+
"parent_section_id": "7.1",
|
| 104 |
+
"section_name": "Author Contributions",
|
| 105 |
+
"text": "If you\u2019d like to, you may include a section for author contributions as is done\nin many journals. This is optional and at the discretion of the authors."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "7.1.x",
|
| 109 |
+
"parent_section_id": "7.1",
|
| 110 |
+
"section_name": "Acknowledgments",
|
| 111 |
+
"text": "Use unnumbered third level headings for the acknowledgments. All\nacknowledgments, including those to funding agencies, go at the end of the paper."
|
| 112 |
+
}
|
| 113 |
+
],
|
| 114 |
+
"appendix": [
|
| 115 |
+
{
|
| 116 |
+
"section_id": "Appendix 1",
|
| 117 |
+
"parent_section_id": null,
|
| 118 |
+
"section_name": "Appendix A Appendix",
|
| 119 |
+
"text": "You may include other additional sections here."
|
| 120 |
+
}
|
| 121 |
+
],
|
| 122 |
+
"tables": {
|
| 123 |
+
"1": {
|
| 124 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Sample table title</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1\">PART</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column\" id=\"S4.T1.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.2.1\">DESCRIPTION</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.2.1\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T1.1.2.1.1\">Dendrite</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S4.T1.1.2.1.2\">Input terminal</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.3.2\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T1.1.3.2.1\">Axon</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T1.1.3.2.2\">Output terminal</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.4.3\">\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T1.1.4.3.1\">Soma</td>\n<td class=\"ltx_td ltx_align_left\" id=\"S4.T1.1.4.3.2\">Cell body (contains cell nucleus)</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 125 |
+
"capture": "Table 1: Sample table title"
|
| 126 |
+
}
|
| 127 |
+
},
|
| 128 |
+
"image_paths": {},
|
| 129 |
+
"validation": true,
|
| 130 |
+
"references": [],
|
| 131 |
+
"url": "http://arxiv.org/html/2406.06449v2"
|
| 132 |
+
}
|
20241004/2406.13444v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2406.17600v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2407.01687v2.json
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Deciphering the Factors Influencing the Efficacy of Chain-of-Thought: Probability, Memorization, and Noisy Reasoning",
|
| 3 |
+
"abstract": "Chain-of-Thought (CoT) prompting has been shown to enhance the multi-step reasoning capabilities of Large Language Models (LLMs).\nHowever,\ndebates persist about whether LLMs exhibit abstract generalization or rely on shallow heuristics when given CoT prompts.\nTo understand the factors influencing CoT reasoning we provide a detailed case study of the symbolic reasoning task of decoding shift ciphers Andress (2014), where letters are shifted forward some number of steps in the alphabet.\nWe analyze the pattern of results produced by three LLMs\u2014GPT-4, Claude 3, and Llama 3.1\u2014performing this task using CoT prompting.\nBy focusing on a single relatively simple task, we are able to identify three factors that systematically affect CoT performance: the probability of the task\u2019s expected output (probability), what the model has implicitly learned during pre-training (memorization), and the number of intermediate operations involved in reasoning (noisy reasoning). We show that these factors can drastically influence task accuracy across all three LLMs; e.g., when tested with GPT-4, varying the output\u2019s probability of occurrence shifts accuracy from to . Overall, we conclude that CoT prompting performance reflects both memorization and a probabilistic version of genuine reasoning.222Code and data are available at https://github.com/aksh555/deciphering_cot.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Reasoning, one of the key aspects of human intelligence, is the process of thinking about something logically and systematically using evidence and past experiences to make a decision Wason (1968 ###reference_b43###); Wason and Johnson-Laird (1972 ###reference_b44###); Fagin et al. (2004 ###reference_b8###). The impressive performance of Large Language Models (LLMs) across a wide range of tasks has spurred extensive research into their reasoning capabilities Huang and Chang (2023 ###reference_b13###); Qiao et al. (2023 ###reference_b31###). It remains unclear whether the behavior of these systems is based on true reasoning or on shallow heuristics.\nSome results provide evidence that LLMs are able to reason Suzgun et al. (2023 ###reference_b38###); Lampinen et al. (2024 ###reference_b18###); Saparov and He (2023 ###reference_b35###), while others show that they still struggle on tasks that humans can easily solve via reasoning Han et al. (2022 ###reference_b11###); Valmeekam et al. (2023 ###reference_b40###); McCoy et al. (2023 ###reference_b24###); Razeghi et al. (2022 ###reference_b34###); Cao et al. (2024 ###reference_b5###).\nThe Chain-of-Thought (CoT; Wei et al., 2022 ###reference_b45###) prompting strategy has played a significant role in this debate. CoT involves prompting an LLM to generate a sequence of intermediate reasoning steps before producing the final answer, given some in-context exemplar(s) of how to break the task into steps.\nCoT and its several variants Kojima et al. (2022 ###reference_b16###); Zhou et al. (2023 ###reference_b52###); Wang et al. (2023b ###reference_b42###) have been shown to substantially improve performance over standard prompting.\nRecent works have tried to identify which aspects of the demonstration contribute to CoT\u2019s enhanced performance Huang and Chang (2023 ###reference_b13###); Madaan and Yazdanbakhsh (2022 ###reference_b22###); Jin et al. (2024 ###reference_b15###), typically relying on assessing performance across a wide range of tasks.\nIn this work, we take a different approach: we present an extensive case study on a single task that allows us to disentangle reasoning from memorization. The task we selected is solving shift ciphers, a simple type of code in which each letter is shifted forward a certain number of positions in the alphabet (Figure 1 ###reference_###, panel 1). We choose this task because it allows us to independently manipulate several factors that could be relevant for characterizing how LLMs solve reasoning tasks when prompted with CoT: difficulty, frequency, and answer probability.\n###figure_1### Our results suggest that CoT performance reflects three factors: probability, memorization, and noisy reasoning.\nFirst, the accuracy of CoT is affected by the probability of the correct output, with more probable outputs resulting in a stronger effect of CoT. Second, performance is higher when memorization is possible, as indicated by the frequency of encountering different shift cipher variants during pre-training.\nThe effects of probability and memorization show that CoT performance is not fully systematic abstract reasoning.\nNonetheless, CoT performance is not solely driven by superficial heuristics: it also shows some hallmarks of true reasoning\u2014albeit a noisy version of true reasoning, in which the error rate increases along with task difficulty (where we quantify task difficulty by the number of implicit reasoning steps involved).\nIn addition, we find evidence that the effect of CoT fundamentally depends on generating sequences of words that increase the probability of the correct answer when conditioned upon; as long as this is the case, CoT can thus succeed even when the demonstrations in the prompt are invalid.\nIn the ongoing debate about whether LLMs reason or memorize Feldman (2020 ###reference_b9###); Zhang et al. (2023a ###reference_b50###); Magar and Schwartz (2022 ###reference_b23###); Srivastava et al. (2024 ###reference_b36###); Antoniades et al. (2024 ###reference_b3###), our results thus support a reasonable middle-ground: LLM behavior displays aspects of both memorization and reasoning, and also reflects the probabilistic origins of these models."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Approach",
|
| 21 |
+
"text": "One challenge of evaluating the role of memorization and reasoning in the performance of LLMs is that these models are typically evaluated on a wide range of complex reasoning tasks, whose variety and complexity can obscure the factors that drive performance. By contrast, we propose to tease apart the factors behind the efficacy of CoT prompting by focusing on a single relatively simple task: deciphering text encoded with a shift cipher.\nEncoding a message with a shift cipher involves replacing every letter with another letter that is some fixed number of positions (called ) forward in the alphabet; decoding is the reverse (shifting backward) as shown in Figure 1 ###reference_###. These are also known as rotation ciphers since they rotate the alphabet forward some number of steps, and they are given the name rot- where corresponds to . For example, given the test word \u201cFDW\u201d and that rot-3 encryption has been used (), decoding involves shifting every letter steps backward\u2014i.e., F C, D A, and W T to obtain \u201cCAT\u201d as the output. In our experiments, we give an LLM a single word encoded with a shift cipher and ask it to decode this text to recover the original word."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Motivation for using shift ciphers",
|
| 27 |
+
"text": "Our main reason for using shift ciphers is\nbecause they involve a sharp dissociation between task complexity\nand task frequency (a key factor in memorization). The complexity of the decipherment task is determined by the shift level\u2014ciphers that require more intermediate steps are more complex. Different shift levels also vary in their frequency in internet text McCoy et al. (2023 ###reference_b24###), and hence in the training data of large language models. Specifically, rot-13 is widely used in internet forums to conceal text such as puzzle solutions and spoilers, and rot-3 and rot-1 commonly appear in tutorials on decipherment (rot-3 is also known as the Caesar cipher, having apparently been used by the eponymous Caesar to encrypt his messages).\nIn addition, shift ciphers facilitate investigation of the effect of probability because the correct answer can be any string, allowing us to modulate the probability of that string easily.\nFurther, the systematic nature of the task makes it easy to generate examples and to verify correctness. Finally, decoding each letter in the message is an independent step, allowing us to easily analyze these individual steps.\nMcCoy et al. (2023 ###reference_b24###) previously evaluated GPT models on shift ciphers, focusing on standard prompting along with some initial results using CoT. They study the effect of only probability and memorization, while we conduct a more extensive investigation into LLM behavior when prompted with CoT by additionally studying the influence of complexity and analyzing more models. Importantly, we add nuance to their findings by arguing for a middle-ground viewpoint that acknowledges the LLM weaknesses identified by McCoy et al. (2023 ###reference_b24###) but also brings in novel observations that highlight the hallmarks of true reasoning that are present in these systems."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "The effect of CoT on shift ciphers",
|
| 33 |
+
"text": "We constructed a dataset comprising 7-letter words having exactly 2 tokens (measured using the tokenizer used by GPT-4) to control for confounding factors relating to tokenization. We found all 3-letter and 4-letter tokens from the lowercase English alphabet and formed words by considering possible combinations of 3-letter word-initial tokens followed by 4-letter non-word-initial tokens.\nFollowing McCoy et al. (2023 ###reference_b24###), we compute the log probability as the log probability that GPT-2 Radford et al. (2019 ###reference_b32###) assigns to the sentence \u2018The word is \"WORD\"\u2019, minus the log probability that it assigns to \u2018The word is \"\u2019; thus, this yields the log probability assigned to just the word and the following quotation mark in the context of \u2018The word is \"\u2019. The closing quotation mark is included because it indicates the end of the word. The words were scored by their log probability and arranged in descending order. Subsequently, five bins\nwere formed by selecting equidistant log probability values as centers, with bin1 having the highest probability and bin5 having the lowest probability. We manually checked the words in this dataset and filtered them to ensure there were no inappropriate words used to obtain 150 words for each bin. We partitioned the 150 examples into two subsets: a subset containing 100 words used to evaluate GPT-4, and a subset containing 50 words used to evaluate logistic regression models that were fitted to GPT-4\u2019s performance on the 100-word subset. We prepared the inputs for the models by producing the shift-cipher-encoded versions of the words from the 5 probability bins across 25 shift levels (1 to 25). We ran all evaluations a single time; the accuracies that we report are accuracies over these 100-example sets.\nWe then assessed performance on this dataset using a variety of different prompts:\nStandard. This is a prompt with just the description of the task and demonstration but no reasoning steps (Figure 2 ###reference_###).\nText-CoT. This prompt encourages the model to decode a message one letter at a time (Figure 3 ###reference_###). We chose this way of framing the CoT prompt following McCoy et al. (2023 ###reference_b24###), who tried several variants and found this to be the best. To get a reasoning step correct, the model must have learned the alphabet during pre-training.\nMath-CoT.\nThe prompt (Appendix A.1 ###reference_### Figure 9 ###reference_###) encourages a reasoning pipeline that involves translating each letter to a number, performing the shift by applying arithmetic to this number, then converting the result back to a letter. The prompt also specifies the mapping between letters and positions, eliminating the need for the model to have internalized the positions of the letters in the alphabet.\nNumber-sequence CoT (Number-CoT). This prompt (Appendix A.1 ###reference_### Figure 10 ###reference_###) makes use of an alternative task that is isomorphic to shift ciphers but based in the number domain\u2014the input and output are number sequences instead of letter sequences. Reasoning involves applying arithmetic to the input elements in the number sequence to get a corresponding output sequence.\nWe ran experiments using both open and closed source models: GPT-4 (gpt-4-0613) OpenAI (2023 ###reference_b28###), Claude 3 (claude-3-opus-20240229) Anthropic (2024 ###reference_b2###), and Llama-3.1-405B-Instruct MetaAI (2024 ###reference_b26###).\nThe reason for using such strong models is that their shift cipher performance is significantly improved by prompting with chain of thought. Additionally, this helps us to control several sources of extraneous errors making it easier to focus on the task itself and isolate the factors affecting CoT: It ensures that the format of the demonstration is closely followed, and that copy errors (errors in copying information from the prompt such as letters from encoded text and letter-position mappings) are rare.\nWe set temperature to and max_new_tokens to .\nFigure 1 ###reference_### provides some initial results for GPT-4. Using standard prompts,\nGPT-4 gets zero accuracy across most shift levels, but it improves substantially (to an average accuracy of 32%) when Text-CoT is used; this result replicates the finding in McCoy et al. (2023 ###reference_b24###) that CoT is helpful for shift ciphers\nbut still remains far from perfect. However, with Number-CoT, GPT-4\u2019s performance becomes nearly perfect (more details are in Appendix A.1 ###reference_###).\nThese results paint CoT prompting in a puzzling light. Prompting with Number-CoT showed that GPT-4 has the core reasoning abilities that would be needed to decode shift ciphers nearly perfectly. Thus, if CoT prompting led to symbolic reasoning, GPT-4 would score perfectly. The fact that it does not shows that CoT reasoning is not pure symbolic reasoning. Nonetheless, it is also clear that CoT does substantially improve over standard prompting, so it is unlikely that CoT reasoning can be explained away as simple memorization.\nIf CoT reasoning is neither simple memorization nor pure symbolic reasoning, what is it? This question motivates our experiments in the next section."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Disentangling the\nfactors influencing CoT performance",
|
| 39 |
+
"text": "We consider four types of reasoning processes that LLMs might be adopting.\nSymbolic reasoning is the use of discrete, deterministic inference rules. Shift ciphers can be perfectly decoded with a simple symbolic algorithm, so a system using fully systematic reasoning should attain 100% accuracy.\nNoisy reasoning is like symbolic reasoning but with the addition of noise that introduces some possibility of each intermediate operation in a reasoning step being wrong. Thus, if the system uses noisy reasoning, we should see accuracy decrease as we increase the number of operations that need to be performed. Shift ciphers let us test this possibility: by varying , we can modulate the number of operations that need to be performed in every reasoning step and observe if accuracy varies accordingly.\nMemorization is a strategy in which a system memorizes the tasks it has encountered in pre-training but does not generalize to new tasks. If memorization is all that LLMs do, we should see higher performance in the cases that are frequently encountered during pre-training than the ones that are not. McCoy et al. (2023 ###reference_b24###) show that is by far the most common shift level in natural corpora because this shift level (sometimes called rot-13) is popular in some online communities. Thus, a hallmark of memorization would be much higher accuracy at than other shift levels.\nProbabilistic reasoning frames a task as choosing the output that is most probable given the input. Such reasoning would be influenced by the prior probability of the output: a probabilistic reasoner should show accuracy that increases as the prior probability of the correct answer increases.\nFigure 4 ###reference_### illustrates the hypothetical performance trends that would be observed in a system adopting each reasoning approach. These approaches are not mutually exclusive; e.g., a reasoner could be influenced by both probability and memorization. Indeed, as discussed below, we find that LLMs\u2019 performance when prompted with CoT displays hallmarks of several different types of reasoning; see Figure 1 ###reference_###, panel 4 for GPT-4 and Figure 11 ###reference_### in the Appendix for Claude 3 and Llama 3.1.\n###figure_2### First, accuracy generally decreases as the shift level increases, a hallmark of noisy reasoning. LLMs\u2019 performance is, in particular, indicative of a two-way version of noisy reasoning in which it can decode a message by shifting letters forward or backward (e.g., instead of decoding a shift of 25 by shifting back 25 letters, it could instead shift forward 1 letter, as doing so requires fewer steps); this two-way nature shows up in the way that accuracy increases as the shift level changes from 20 to 25.\nSecond, evidence of probabilistic reasoning can be seen in the fact that accuracy is substantially higher for high prob (the highest-probability bin, bin 1) than low prob (the lowest-probability bin, bin 5). High prob / low prob refer to the probability of the words that are the correct answers when our examples are decoded, where probability is quantified using GPT-2 as described in Section 3.2 ###reference_.SSS0.Px1###. The \u201chigh prob\u201d cases are common words such as {\u2019mariner\u2019, \u2019shrines\u2019, \u2019paywall\u2019, \u2026}, while the \u201clow prob\u201d cases are nonsense letter sequences such as {\u2019xcbrouw\u2019, \u2019jsxrouw\u2019, \u2019levjspx\u2019, \u2026}.\nFinally, although a shift level of 13 requires the most reasoning steps of any shift level (assuming decoding can be done forward or backward), there is a spike in accuracy at a shift level of 13. As discussed above, this spike is a hallmark of memorization since 13 is the most common shift level in natural corpora.\nFor the upcoming detailed analysis experiments, we use GPT-4 as the main reference model, since Claude 3 and Llama 3.1 exhibited similar trends as GPT-4 on the evaluations presented so far.***The detailed results of Llama-3.1-405B-Instruct and claude-3-opus-20240229 are shown in Appendix \u00a7A.2 ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "A simple probabilistic approach to modeling the reasoning process",
|
| 45 |
+
"text": "To make these intuitively-stated observations more rigorous, we perform a logistic regression to determine the statistical significance of several factors.\nThe outcome variable is a binary variable indicating whether GPT-4 got the correct answer on each example. We include the following predictors:\n: log probability of the encoded input text as measured by GPT-2 Radford et al. (2019 ###reference_b32###). The inputs tend to have a very low probability because they are enciphered.\n: log probability of the ground-truth output text as measured by GPT-2.\n: we used the frequency of occurrence of all shift levels that McCoy et al. (2023 ###reference_b24###) provided based on analysis of the C4 corpus Raffel et al. (2020 ###reference_b33###). The assumption is that the distribution of shifts in C4 is similar to the distribution in the training data for GPT-4.\n: the number of steps that must be performed to decode each letter; this feature is added to account for one-way reasoning.\n: this value is the minimum number of steps that must be performed to decode each letter, under the assumption that decoding can be done by moving steps backward or steps forward; as discussed above, GPT-4 indeed shows evidence of using both of these decoding directions.\nSeveral of these variables correspond to the critical properties that are indicative of our hypothesized reasoning processes: should have a significant effect if probabilistic reasoning is used, should have a significant effect if memorization is used, and quantifies the difficulty of the task, which should have a significant effect if noisy reasoning is used. The remaining factors are included as potential confounds to control for.\nThe overall logistic regression thus took the following form:\nThe following features had a statistically significant effect on model performance: , , , and \n( in all cases).\nThese results therefore quantitatively support the conclusion that GPT-4 incorporates processes based on probability, memorization, and noisy reasoning (both forward and backward).\n###figure_3### Figure 5 ###reference_### shows the predictions of the logistic regression compared to GPT-4\u2019s actual performance.\nThe logistic regression correctly predicts the main trends in GPT-4\u2019s behavior (as expected, it does not match the curve exactly due to the simplicity of the model).\nIn the next few subsections, we conduct some additional experiments that investigate each hypothesized reasoning type in more detail.\n###figure_4###"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Analyzing the effect of probability",
|
| 51 |
+
"text": "If an LLM is influenced by probability, we would expect to occasionally observe unfaithfulness between the chain of reasoning steps produced by the LLM and the LLM\u2019s final answer. Specifically, if the individual reasoning steps would point to a final output that is low-probability, a probabilistic reasoner might instead produce a different final answer that has a higher probability. For example, in our CoT experiments, each step produces one letter, and these letters must be concatenated to form the final answer. If the individual step outputs are S, T, A, Z, the final answer should be STAZ, but a model might instead \u201cself-correct\u201d by producing the higher-probability word STAY.\nSuch unfaithfulness can help or hurt the model. When the correct answer truly is a low-probability word such as STAZ, then correcting to STAY would reduce the model\u2019s accuracy. However, if the model had made a mistake during the reasoning chain\u2014such as by producing S, T, A, Z when the chain should have been S, T, A, Y\u2014then correcting to STAY would rescue the model from its error.\nTo investigate unfaithfulness, we compare the faithful accuracy that would be obtained by concatenating GPT-4\u2019s step outputs to the actual overall accuracy.\nWe indeed observe that overall accuracy is generally lower than faithful accuracy, illustrating that unfaithfulness occurs.\nFurther, the drop in accuracy is more pronounced in the low-probability setting than the high-probability setting, which is consistent with the intuition that the lower the probability of a concatenated answer is, the more likely it will be that a probability-reliant model will be steered away from that answer. See Figure 6 ###reference_###\nfor the full results.\nTable 1 ###reference_### provides a more detailed view of unfaithfulness.\nIncorrect intermediate chains (i.e., concatenated step outputs) are followed by correct final answers much more often in the setting where the correct answer has a high probability (34% and 55% of the time for rot-4 and rot-13, respectively) than in the low probability setting (1% and 19% of the time for rot-4 and rot-13, respectively). On the other hand, correct intermediate chains are followed by incorrect final answers less often in the high probability setting (7% and 1% of the time for rot-4 and rot-13, respectively) than in the low-probability setting (14% and 9% of the time for rot-4 and rot-13, respectively).\nThese results support the hypothesis that GPT-4 over-relies on the prior probability of potential outputs (see Jang et al. (2023 ###reference_b14###) for some related observations).\nIf the answer has a high probability of occurrence, the model\u2019s priors favor generating it even if its intermediate reasoning steps suggest an alternative output. Conversely, if the answer is of lower probability, then even if the chain of reasoning is correct, the priors exert a detrimental influence leading to incorrect final answers.\nProb\n\n\n\nChain Steps\n\nOutput\n\nrot-4\nrot-13\n\nCorrect\nIncorrect\nCorrect\nIncorrect\n\n\n\nHigh\nCorrect\n19\n7\n15\n1\n\nIncorrect\n34\n40\n55\n29\n\nLow\nCorrect\n7\n14\n7\n9\n\nIncorrect\n1\n78\n19\n65"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Analyzing the effects of noise",
|
| 57 |
+
"text": "The statistically significant impact of is evidence that GPT-4\u2019s CoT behavior is in part a noisy version of symbolic reasoning.\nAccuracy falls as the shift level increases from 1 to 12 and then recovers at higher shift levels\n(Figure 8 ###reference_###), consistent with a noisy reasoning process in which deciphering each letter with a shift level of involves implicit steps, with noise that gives each step some probability of being performed incorrectly. Note that the implicit steps referred to here are different from the steps that are explicitly produced in the chain of thought: the chain of thought uses one explicit step per letter (Figure 3 ###reference_###), but here we are discussing the operations that must be implicitly carried out within each step of this chain in order to decode each letter.\n###figure_5### We have argued that the relation between accuracy and shift level is evidence that GPT-4 uses a two-way strategy. That is, accuracy is high for small shifts such as 1 but also for large shifts such as 25, which could plausibly be explained by GPT-4 implicitly selecting whichever direction will minimize the number of steps it needs to compute\u2014shifting letters backward for small shift levels or forward for large shift levels.\nThis two-way strategy is effective in that it supports strong performance on large shift levels such as 25.\nHowever, we also observe evidence that it contributes to the noise that causes accuracy to decline as the shift level increases.\nFigure 7 ###reference_### shows the actual shift level that GPT-4 produces for each letter in its chain of thought, for each of four intended shift levels. Across all four of these cases, GPT-4 shows peaks at both and .\nThus, while some of the noise affecting the reasoning process may be random, it appears that at least some of the noise can be attributed to confusion between possible shift levels. Decoding a shift level of can be done by shifting backward steps or forward steps, but it appears that GPT-4 sometimes mixes up these two strategies by shifting forward steps (or, equivalently, shifting backward steps), contributing to the overall noise.\nAnother factor that interacts with noise is temperature. In principle, if CoT entailed pure symbolic reasoning, it would assign 100% probability to the correct continuation (i.e., each predicted next token) and 0% probability to everything else. If so, the temperature used would not affect performance.\nHowever, we observe that CoT scores better with a low temperature. For instance, in rot-13, GPT-4\u2019s accuracy is 30.0% at temperature=0 and 0.33% at temperature=1. This shows that its predicted distribution over the vocabulary is not fully discrete\u2014it has some noise in it that a low temperature can remove. However, even a temperature of 0 does not make the performance perfect because noise does not solely arise in the final distribution over the vocabulary (which temperature modifies) but also influences the implicit intermediate steps used to produce that distribution (which temperature does not change).\n###figure_6###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.4",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Analyzing the effect of memorization",
|
| 63 |
+
"text": "To further investigate memorization, we focus on rot-13, because frequency is generally confounded with simplicity for the other shift levels (e.g., rot-1 is simple as well as frequent).\n is the most frequent shift level McCoy et al. (2023 ###reference_b24###), and we observe in\nFigure 8 ###reference_###\nthat GPT-4 shows a spike in accuracy at this shift level in both Text-CoT and Math-CoT, providing strong evidence that memorization plays a role in GPT-4\u2019s CoT performance.\nWe also observe that memorization influences unfaithfulness. Consider the cells in Table 1 ###reference_### that involve incorrect chain steps outputs but correct final answers; such cases are much more common for rot-13 than for other levels, including rot-4 (the other shift level shown in that table); e.g., in the high-probability case, 55% of rot-13 examples fall in this category, while only 34% of rot-4 examples do.\nThis pattern also provides some evidence for memorization: for rot-13, the model may have two \u201cpaths\u201d for producing the final output\u2014it could use the chain of thought it has produced, or it could go directly from the input to the output due to memorization. Thus, when it produces the final output, it might implicitly weigh both of those paths, which helps it to correct faulty chains because it has a back-up path to consider. However, for rot-4, it may be that only the path involving the chain of thought is available, such that GPT-4 cannot fix incorrect chains as easily because it does not have this alternative path to fall back on."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.5",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "The role of intermediate reasoning steps",
|
| 69 |
+
"text": "Finally, we study the role of the intermediate reasoning steps that are involved with CoT prompting\u2014both the chain that GPT-4 produces and the chain provided in the demonstration.\nFirst we focus on the chain of thought that GPT-4 produces before providing its final answer. We consider two potential roles that this chain could have. First, it could be that the chain is helpful because it provides text that is useful for GPT-4 to condition on in later steps. Alternatively, it could be that the critical aspect of CoT reasoning is internal\u2014rather than depending on the text that is produced, CoT could be helpful because it gives the LLM the opportunity to internally perform additional reasoning steps.\nTo disentangle these possibilities, we modify the prompt so that GPT-4 is told to perform the same steps of reasoning as before, but to have the intermediate output that it produces be uninformative. Specifically, we used the Text-CoT prompt but instructed the model to not reveal step answers and instead output a *. The step answers in the demonstration were also replaced by \u2018*\u2019; thus we left the format of reasoning intact but the expected generation token was no longer a component of the final answer.\nNext, we asked the model to explicitly think about the correct letter that should go in the place of the * but just not write it down. In the demonstration, we first provided an example with all step answers and then repeated the same example but with a * in place of each output letter (see Appendix A.3 ###reference_### Figure 12 ###reference_### for prompts).\nIn both settings, performance\nwas similar to that of the standard prompting variant (shown in Figure 1 ###reference_###, panel 2). This is evidence that CoT depends on \u201cself-conditioning\u201d\u2014explicitly producing text that will be useful as context to condition on when producing the final answer. Merely instructing a model to \u201cthink\u201d silently is not helpful, suggesting that the reasoning does not occur internally.\nThese results corroborate prior work finding that CoT is unhelpful when the model is told to produce contentless \u201cfiller\u201d tokens instead of contentful text\n(Lanham et al., 2023 ###reference_b19###); models can reason internally when explicitly trained to do so\n(Pfau et al., 2024 ###reference_b30###), but current LLMs without this explicit training do not seem to have this ability.\nIn experiments described in Appendix A.3 ###reference_###, we also find that the validity of the reasoning shown in the prompt does not have a strong effect of CoT performance for shift ciphers. That is, even when the demonstration is perturbed such that it contains many errors, GPT-4\u2019s CoT performance remains approximately the same. This finding corroborates prior work showing that the validity of demonstrations did not matter much Wang et al. (2023a ###reference_b41###); Madaan and Yazdanbakhsh (2022 ###reference_b22###); Ye et al. (2023 ###reference_b49###); the demonstration seems to merely guide the model to solve the task by providing a format to generate accurate reasoning steps Min et al. (2022 ###reference_b27###)."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "5",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Conclusion",
|
| 75 |
+
"text": "We have used the case study of shift ciphers to disentangle the factors that influence CoT reasoning, with a focus on characterizing what type of reasoning is used in models prompted with CoT. We found that CoT performance is statistically significantly influenced by the probability of occurrence of the expected task output, the frequency of the task in corpora, and the number of reasoning steps that must be (implicitly) performed. These results suggest that CoT reasoning can be characterized as probabilistic, memorization-influenced noisy reasoning, meaning that LLM behavior displays traits of both memorization and generalization."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [
|
| 79 |
+
{
|
| 80 |
+
"section_id": "Appendix 1",
|
| 81 |
+
"parent_section_id": null,
|
| 82 |
+
"section_name": "Appendix A Additional Details & Experiments",
|
| 83 |
+
"text": "While the Standard prompt (Figure 2 ###reference_###) yields poor performance on most shift levels, the Number-CoT prompt (Figure 10 ###reference_###) in contrast gives nearly perfect scores across the shift levels. It is to be noted that some miscellaneous noise is captured in the Number-CoT case. This arises mostly due to incomplete generations/half-completed chains requiring more tokens than needed as the model does some additional sub-reasoning steps, and in very rare cases produces numbers greater than 25.\nThe Math-CoT prompt used is shown in Figure 9 ###reference_###.\n###figure_7### We display the results with Llama 3.1 and Claude 3 when prompted with standard prompts and Text-CoT in Figure 11 ###reference_###. Interestingly, Llama 3.1 was trained on CoT data, such that even when it is prompted with standard prompts it generates reasoning steps. However, as we can see from Figure 11 ###reference_###, with Text-CoT there is a further enhancement in scores across the shift levels.\nOverall, both models display trends similar to that of GPT-4 shown in Figure 1 ###reference_###.\nFigure 12 ###reference_### shows the prompts used to test the importance of producing explicit reasoning steps by forcing the model to \u201cthink\u201d silently."
|
| 84 |
+
}
|
| 85 |
+
],
|
| 86 |
+
"tables": {
|
| 87 |
+
"1": {
|
| 88 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<p class=\"ltx_p ltx_align_center\" id=\"S4.T1.1\"><span class=\"ltx_text\" id=\"S4.T1.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T1.1.1.1\" style=\"width:299.6pt;height:108pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1,1) ;\">\n<span class=\"ltx_p\" id=\"S4.T1.1.1.1.1\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1\">\n<span class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.1.1.1.1.1.1\">\n<span class=\"ltx_thead\">\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_tt ltx_rowspan ltx_rowspan_2\" id=\"S4.T1.1.1.1.1.1.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1.1.1.1\">Prob</span></span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_tt ltx_rowspan ltx_rowspan_2\" id=\"S4.T1.1.1.1.1.1.1.1.1.2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1\">\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.1\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.1.1.1\">Chain Steps</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.2\">\n<span class=\"ltx_td ltx_nopad_r ltx_align_center\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.2.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1.1.2.1.1.2.1.1\">Output</span></span></span>\n</span></span></span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_tt ltx_colspan ltx_colspan_2\" id=\"S4.T1.1.1.1.1.1.1.1.1.3\"><span class=\"ltx_text ltx_font_typewriter ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1.1.3.1\">rot-4</span></span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt ltx_colspan ltx_colspan_2\" id=\"S4.T1.1.1.1.1.1.1.1.1.4\"><span class=\"ltx_text ltx_font_typewriter ltx_font_bold\" id=\"S4.T1.1.1.1.1.1.1.1.1.4.1\">rot-13</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.2.2\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.2.2.1\">Correct</span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.2.2.2\">Incorrect</span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.2.2.3\">Correct</span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.2.2.4\">Incorrect</span></span>\n</span>\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.3.1\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t ltx_rowspan ltx_rowspan_2\" id=\"S4.T1.1.1.1.1.1.1.3.1.1\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.3.1.1.1\">High</span></span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.3.1.2\">Correct</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.3.1.3\">19</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.3.1.4\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.3.1.4.1\" style=\"color:#00E000;\">7</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.3.1.5\">15</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.3.1.6\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.3.1.6.1\" style=\"color:#00E000;\">1</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.4.2\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.1.1.1.1.1.1.4.2.1\">Incorrect</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.1.1.1.1.4.2.2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.4.2.2.1\" style=\"color:#00E000;\">34</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.1.1.1.1.1.1.4.2.3\">40</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.1.1.1.1.4.2.4\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.4.2.4.1\" style=\"color:#00E000;\">55</span></span>\n<span class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.1.1.1.1.1.4.2.5\">29</span></span>\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.5.3\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r ltx_border_t ltx_rowspan ltx_rowspan_2\" id=\"S4.T1.1.1.1.1.1.1.5.3.1\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.5.3.1.1\">Low</span></span>\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.5.3.2\">Correct</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.5.3.3\">7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.5.3.4\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.5.3.4.1\" style=\"color:#FF0000;\">14</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.5.3.5\">7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.1.1.1.1.1.5.3.6\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.5.3.6.1\" style=\"color:#FF0000;\">9</span></span></span>\n<span class=\"ltx_tr\" id=\"S4.T1.1.1.1.1.1.1.6.4\">\n<span class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"S4.T1.1.1.1.1.1.1.6.4.1\">Incorrect</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.1.1.1.1.6.4.2\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.6.4.2.1\" style=\"color:#FF0000;\">1</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T1.1.1.1.1.1.1.6.4.3\">78</span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.1.1.1.1.6.4.4\"><span class=\"ltx_text\" id=\"S4.T1.1.1.1.1.1.1.6.4.4.1\" style=\"color:#FF0000;\">19</span></span>\n<span class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.1.1.1.1.1.1.6.4.5\">65</span></span>\n</span>\n</span></span></span>\n</span></span></span></p>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Confusion matrices (100 examples; 2 probability bins {high, low}) for <span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T1.12.1\">rot-4</span> and <span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T1.13.2\">rot-13</span>. <span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.14.3\">Effect of memorization</span>: incorrect step outputs lead to correct final answers more often for <span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T1.15.4\">rot-13</span> than <span class=\"ltx_text ltx_font_typewriter\" id=\"S4.T1.16.5\">rot-4</span>. <span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.17.6\">Effect of probability</span>: Unfaithfulness has a positive effect more often in <span class=\"ltx_text\" id=\"S4.T1.18.7\" style=\"color:#00E000;\">high-probability bins</span> than in <span class=\"ltx_text\" id=\"S4.T1.19.8\" style=\"color:#FF0000;\">low-probability bins</span>; Conversely, unfaithfulness has a negative effect more often in <span class=\"ltx_text\" id=\"S4.T1.20.9\" style=\"color:#FF0000;\">low-probability bins</span> than in <span class=\"ltx_text\" id=\"S4.T1.21.10\" style=\"color:#00E000;\">high-probability bins</span>.</figcaption>\n</figure>",
|
| 89 |
+
"capture": "Table 1: Confusion matrices (100 examples; 2 probability bins {high, low}) for rot-4 and rot-13. Effect of memorization: incorrect step outputs lead to correct final answers more often for rot-13 than rot-4. Effect of probability: Unfaithfulness has a positive effect more often in high-probability bins than in low-probability bins; Conversely, unfaithfulness has a negative effect more often in low-probability bins than in high-probability bins."
|
| 90 |
+
}
|
| 91 |
+
},
|
| 92 |
+
"image_paths": {
|
| 93 |
+
"1": {
|
| 94 |
+
"figure_path": "2407.01687v2_figure_1.png",
|
| 95 |
+
"caption": "Figure 1: Overview. (1) Task: We have LLMs decode messages written in a shift cipher, in which each letter is shifted a fixed number of positions forward in the alphabet. (2) With standard prompting, GPT-4 performs poorly across most shift levels. (3) However, GPT-4 scores nearly perfectly on an isomorphic task based on numbers rather than letters. (4) With CoT prompting, GPT-4 adopts probabilistic and memorization-influenced noisy reasoning. That is, its performance (right) combines the trends we have hypothesized for each of the three factors on the left.",
|
| 96 |
+
"url": "http://arxiv.org/html/2407.01687v2/x1.png"
|
| 97 |
+
},
|
| 98 |
+
"4": {
|
| 99 |
+
"figure_path": "2407.01687v2_figure_4.png",
|
| 100 |
+
"caption": "Figure 4: Hypothetical accuracy vs. shift-level for various types of reasoning. Under noisy one-way, the model only shifts letters backward; under noisy two-way, it adopts the shorter path between going forward and backward. The hypothetical memorization accuracy is based on shift level frequencies in internet corpora. Probabilistic would involve much higher scores on high prob than low prob.",
|
| 101 |
+
"url": "http://arxiv.org/html/2407.01687v2/x2.png"
|
| 102 |
+
},
|
| 103 |
+
"5": {
|
| 104 |
+
"figure_path": "2407.01687v2_figure_5.png",
|
| 105 |
+
"caption": "Figure 5: \nThe logistic regression curve captures the overall trend exhibited by GPT-4.",
|
| 106 |
+
"url": "http://arxiv.org/html/2407.01687v2/x3.png"
|
| 107 |
+
},
|
| 108 |
+
"6": {
|
| 109 |
+
"figure_path": "2407.01687v2_figure_6.png",
|
| 110 |
+
"caption": "Figure 6: Actual overall decoding accuracy vs. faithful accuracy across shift levels showing the effects of probability. The effect is amplified for low probability outputs as seen in the larger drop in accuracy between the orange and blue bin 5 (low probability) lines.",
|
| 111 |
+
"url": "http://arxiv.org/html/2407.01687v2/x4.png"
|
| 112 |
+
},
|
| 113 |
+
"7": {
|
| 114 |
+
"figure_path": "2407.01687v2_figure_7.png",
|
| 115 |
+
"caption": "Figure 7: Normalized frequency distribution vs. predicted s\u2062h\u2062i\u2062f\u2062t\u2062_\u2062l\u2062e\u2062v\u2062e\u2062l\ud835\udc60\u210e\ud835\udc56\ud835\udc53\ud835\udc61_\ud835\udc59\ud835\udc52\ud835\udc63\ud835\udc52\ud835\udc59shift\\_levelitalic_s italic_h italic_i italic_f italic_t _ italic_l italic_e italic_v italic_e italic_l of step answers for rot-20 to rot-23. The appearance of peaks at 26\u2212s\u2062h\u2062i\u2062f\u2062t\u2062_\u2062l\u2062e\u2062v\u2062e\u2062l26\ud835\udc60\u210e\ud835\udc56\ud835\udc53\ud835\udc61_\ud835\udc59\ud835\udc52\ud835\udc63\ud835\udc52\ud835\udc5926-shift\\_level26 - italic_s italic_h italic_i italic_f italic_t _ italic_l italic_e italic_v italic_e italic_l in Math-CoT and Text-CoT prompts showcases the model\u2019s noisy attempt in taking the shorter path\u2014i.e., moving 26\u2212x26\ud835\udc6526-x26 - italic_x shifts forward.",
|
| 116 |
+
"url": "http://arxiv.org/html/2407.01687v2/x5.png"
|
| 117 |
+
},
|
| 118 |
+
"8": {
|
| 119 |
+
"figure_path": "2407.01687v2_figure_8.png",
|
| 120 |
+
"caption": "Figure 8: Accuracy for Text- and Math- CoT prompt styles with GPT-4. Math-CoT performs better than Text-CoT, but both display evidence of memorization as accuracy is highest for shift level 13\u2014the most frequent shift in real-world corpora.",
|
| 121 |
+
"url": "http://arxiv.org/html/2407.01687v2/x6.png"
|
| 122 |
+
},
|
| 123 |
+
"11": {
|
| 124 |
+
"figure_path": "2407.01687v2_figure_11.png",
|
| 125 |
+
"caption": "Figure 11: Accuracies with Claude 3 and Llama 3.1. The top panel shows the performance when prompted with Standard prompting. The bottom panel shows the trend with Text-CoT. The shaded regions indicate the gap between the low and high probability bins.",
|
| 126 |
+
"url": "http://arxiv.org/html/2407.01687v2/x7.png"
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
"validation": true,
|
| 130 |
+
"references": [
|
| 131 |
+
{
|
| 132 |
+
"1": {
|
| 133 |
+
"title": "Chapter 5 - cryptography.",
|
| 134 |
+
"author": "Jason Andress. 2014.",
|
| 135 |
+
"venue": "In Jason Andress, editor, The Basics of Information Security (Second Edition), second edition, pages 69\u201388. Syngress, Boston.",
|
| 136 |
+
"url": "https://doi.org/10.1016/B978-0-12-800744-0.00005-1"
|
| 137 |
+
}
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"2": {
|
| 141 |
+
"title": "Claude 3.",
|
| 142 |
+
"author": "Anthropic. 2024.",
|
| 143 |
+
"venue": "https://www.anthropic.com/news/claude-3-family.",
|
| 144 |
+
"url": null
|
| 145 |
+
}
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"3": {
|
| 149 |
+
"title": "Generalization vs. memorization: Tracing language models\u2019 capabilities back to pretraining data.",
|
| 150 |
+
"author": "Antonis Antoniades, Xinyi Wang, Yanai Elazar, Alfonso Amayuelas, Alon Albalak, Kexun Zhang, and William Yang Wang. 2024.",
|
| 151 |
+
"venue": "In ICML 2024 Workshop on Foundation Models in the Wild.",
|
| 152 |
+
"url": "https://openreview.net/forum?id=0LaybrPql4"
|
| 153 |
+
}
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"4": {
|
| 157 |
+
"title": "Language models are few-shot learners.",
|
| 158 |
+
"author": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D. Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020.",
|
| 159 |
+
"venue": "In Advances in Neural Information Processing Systems, volume 33, pages 1877\u20131901. Curran Associates, Inc.",
|
| 160 |
+
"url": "https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf"
|
| 161 |
+
}
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"5": {
|
| 165 |
+
"title": "Retentive or forgetful? Diving into the knowledge memorizing mechanism of language models.",
|
| 166 |
+
"author": "Boxi Cao, Qiaoyu Tang, Hongyu Lin, Shanshan Jiang, Bin Dong, Xianpei Han, Jiawei Chen, Tianshu Wang, and Le Sun. 2024.",
|
| 167 |
+
"venue": "In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 14016\u201314036, Torino, Italia. ELRA and ICCL.",
|
| 168 |
+
"url": "https://aclanthology.org/2024.lrec-main.1222"
|
| 169 |
+
}
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"6": {
|
| 173 |
+
"title": "Data distributional properties drive emergent in-context learning in transformers.",
|
| 174 |
+
"author": "Stephanie Chan, Adam Santoro, Andrew Lampinen, Jane Wang, Aaditya Singh, Pierre Richemond, James McClelland, and Felix Hill. 2022.",
|
| 175 |
+
"venue": "In Advances in Neural Information Processing Systems, volume 35, pages 18878\u201318891. Curran Associates, Inc.",
|
| 176 |
+
"url": "https://proceedings.neurips.cc/paper_files/paper/2022/file/77c6ccacfd9962e2307fc64680fc5ace-Paper-Conference.pdf"
|
| 177 |
+
}
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"7": {
|
| 181 |
+
"title": "Understanding in-context learning with a pelican soup framework.",
|
| 182 |
+
"author": "Ting-Rui Chiang and Dani Yogatama. 2024.",
|
| 183 |
+
"venue": "arXiv preprint arXiv:2402.10424.",
|
| 184 |
+
"url": null
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"8": {
|
| 189 |
+
"title": "Reasoning about Knowledge.",
|
| 190 |
+
"author": "Ronald Fagin, Joseph Y. Halpern, Yoram Moses, and Moshe Vardi. 2004.",
|
| 191 |
+
"venue": "MIT press.",
|
| 192 |
+
"url": null
|
| 193 |
+
}
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"9": {
|
| 197 |
+
"title": "Does learning require memorization? A short tale about a long tail.",
|
| 198 |
+
"author": "Vitaly Feldman. 2020.",
|
| 199 |
+
"venue": "In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pages 954\u2013959.",
|
| 200 |
+
"url": null
|
| 201 |
+
}
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"10": {
|
| 205 |
+
"title": "Towards revealing the mystery behind chain of thought: A theoretical perspective.",
|
| 206 |
+
"author": "Guhao Feng, Bohang Zhang, Yuntian Gu, Haotian Ye, Di He, and Liwei Wang. 2023.",
|
| 207 |
+
"venue": "In Advances in Neural Information Processing Systems, volume 36, pages 70757\u201370798. Curran Associates, Inc.",
|
| 208 |
+
"url": "https://proceedings.neurips.cc/paper_files/paper/2023/file/dfc310e81992d2e4cedc09ac47eff13e-Paper-Conference.pdf"
|
| 209 |
+
}
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"11": {
|
| 213 |
+
"title": "Folio: Natural language reasoning with first-order logic.",
|
| 214 |
+
"author": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, Lucy Sun, Alex Wardle-Solano, Hannah Szabo, Ekaterina Zubova, Matthew Burtell, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Alexander R. Fabbri, Wojciech Kryscinski, Semih Yavuz, Ye Liu, Xi Victoria Lin, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Rex Ying, Arman Cohan, and Dragomir Radev. 2022.",
|
| 215 |
+
"venue": "arXiv preprint arXiv:2209.00840.",
|
| 216 |
+
"url": null
|
| 217 |
+
}
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"12": {
|
| 221 |
+
"title": "In-context learning creates task vectors.",
|
| 222 |
+
"author": "Roee Hendel, Mor Geva, and Amir Globerson. 2023.",
|
| 223 |
+
"venue": "In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 9318\u20139333, Singapore. Association for Computational Linguistics.",
|
| 224 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-emnlp.624"
|
| 225 |
+
}
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"13": {
|
| 229 |
+
"title": "Towards reasoning in large language models: A survey.",
|
| 230 |
+
"author": "Jie Huang and Kevin Chen-Chuan Chang. 2023.",
|
| 231 |
+
"venue": "In Findings of the Association for Computational Linguistics: ACL 2023, pages 1049\u20131065, Toronto, Canada. Association for Computational Linguistics.",
|
| 232 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-acl.67"
|
| 233 |
+
}
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"14": {
|
| 237 |
+
"title": "Can large language models truly understand prompts? A case study with negated prompts.",
|
| 238 |
+
"author": "Joel Jang, Seonghyeon Ye, and Minjoon Seo. 2023.",
|
| 239 |
+
"venue": "In Transfer Learning for Natural Language Processing Workshop, pages 52\u201362. PMLR.",
|
| 240 |
+
"url": null
|
| 241 |
+
}
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"15": {
|
| 245 |
+
"title": "The impact of reasoning step length on large language models.",
|
| 246 |
+
"author": "Mingyu Jin, Qinkai Yu, Dong Shu, Haiyan Zhao, Wenyue Hua, Yanda Meng, Yongfeng Zhang, and Mengnan Du. 2024.",
|
| 247 |
+
"venue": "In Findings of the Association for Computational Linguistics ACL 2024, pages 1830\u20131842, Bangkok, Thailand and virtual meeting. Association for Computational Linguistics.",
|
| 248 |
+
"url": "https://doi.org/10.18653/v1/2024.findings-acl.108"
|
| 249 |
+
}
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"16": {
|
| 253 |
+
"title": "Large language models are zero-shot reasoners.",
|
| 254 |
+
"author": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022.",
|
| 255 |
+
"venue": "In Advances in Neural Information Processing Systems.",
|
| 256 |
+
"url": "https://openreview.net/forum?id=e2TBb5y0yFf"
|
| 257 |
+
}
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"17": {
|
| 261 |
+
"title": "In-context learning learns label relationships but is not conventional learning.",
|
| 262 |
+
"author": "Jannik Kossen, Yarin Gal, and Tom Rainforth. 2024.",
|
| 263 |
+
"venue": "In The Twelfth International Conference on Learning Representations.",
|
| 264 |
+
"url": "https://openreview.net/forum?id=YPIA7bgd5y"
|
| 265 |
+
}
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"18": {
|
| 269 |
+
"title": "Language models, like humans, show content effects on reasoning tasks.",
|
| 270 |
+
"author": "Andrew K. Lampinen, Ishita Dasgupta, Stephanie C. Y. Chan, Hannah R. Sheahan, Antonia Creswell, Dharshan Kumaran, James L. McClelland, and Felix Hill. 2024.",
|
| 271 |
+
"venue": "PNAS Nexus, 3(7):pgae233.",
|
| 272 |
+
"url": "https://doi.org/10.1093/pnasnexus/pgae233"
|
| 273 |
+
}
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"19": {
|
| 277 |
+
"title": "Measuring faithfulness in chain-of-thought reasoning.",
|
| 278 |
+
"author": "Tamera Lanham, Anna Chen, Ansh Radhakrishnan, Benoit Steiner, Carson Denison, Danny Hernandez, Dustin Li, Esin Durmus, Evan Hubinger, Jackson Kernion, Kamil\u0117 Luko\u0161i\u016bt\u0117, Karina Nguyen, Newton Cheng, Nicholas Joseph, Nicholas Schiefer, Oliver Rausch, Robin Larson, Sam McCandlish, Sandipan Kundu, Saurav Kadavath, Shannon Yang, Thomas Henighan, Timothy Maxwell, Timothy Telleen-Lawton, Tristan Hume, Zac Hatfield-Dodds, Jared Kaplan, Jan Brauner, Samuel R. Bowman, and Ethan Perez. 2023.",
|
| 279 |
+
"venue": "Preprint, arXiv:2307.13702.",
|
| 280 |
+
"url": "https://arxiv.org/abs/2307.13702"
|
| 281 |
+
}
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"20": {
|
| 285 |
+
"title": "Dissecting chain-of-thought: Compositionality through in-context filtering and learning.",
|
| 286 |
+
"author": "Yingcong Li, Kartik Sreenivasan, Angeliki Giannou, Dimitris Papailiopoulos, and Samet Oymak. 2023.",
|
| 287 |
+
"venue": "In Thirty-seventh Conference on Neural Information Processing Systems.",
|
| 288 |
+
"url": "https://openreview.net/forum?id=xEhKwsqxMa"
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"21": {
|
| 293 |
+
"title": "Chain of thought empowers transformers to solve inherently serial problems.",
|
| 294 |
+
"author": "Zhiyuan Li, Hong Liu, Denny Zhou, and Tengyu Ma. 2024.",
|
| 295 |
+
"venue": "In The Twelfth International Conference on Learning Representations.",
|
| 296 |
+
"url": "https://openreview.net/forum?id=3EWTEy9MTM"
|
| 297 |
+
}
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"22": {
|
| 301 |
+
"title": "Text and patterns: For effective chain of thought, it takes two to tango.",
|
| 302 |
+
"author": "Aman Madaan and Amir Yazdanbakhsh. 2022.",
|
| 303 |
+
"venue": "arXiv preprint arXiv:2209.07686.",
|
| 304 |
+
"url": null
|
| 305 |
+
}
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"23": {
|
| 309 |
+
"title": "Data contamination: From memorization to exploitation.",
|
| 310 |
+
"author": "Inbal Magar and Roy Schwartz. 2022.",
|
| 311 |
+
"venue": "In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 157\u2013165, Dublin, Ireland. Association for Computational Linguistics.",
|
| 312 |
+
"url": "https://doi.org/10.18653/v1/2022.acl-short.18"
|
| 313 |
+
}
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"24": {
|
| 317 |
+
"title": "Embers of autoregression: Understanding large language models through the problem they are trained to solve.",
|
| 318 |
+
"author": "R. Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew Hardy, and Thomas L. Griffiths. 2023.",
|
| 319 |
+
"venue": "arXiv preprint arXiv:2309.13638.",
|
| 320 |
+
"url": null
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"25": {
|
| 325 |
+
"title": "The expressive power of transformers with chain of thought.",
|
| 326 |
+
"author": "William Merrill and Ashish Sabharwal. 2024.",
|
| 327 |
+
"venue": "In The Twelfth International Conference on Learning Representations.",
|
| 328 |
+
"url": "https://openreview.net/forum?id=NjNGlPh8Wh"
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"26": {
|
| 333 |
+
"title": "The Llama 3 herd of models.",
|
| 334 |
+
"author": "MetaAI. 2024.",
|
| 335 |
+
"venue": "arXiv preprint arXiv:2407.21783.",
|
| 336 |
+
"url": null
|
| 337 |
+
}
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"27": {
|
| 341 |
+
"title": "Rethinking the role of demonstrations: What makes in-context learning work?",
|
| 342 |
+
"author": "Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettlemoyer. 2022.",
|
| 343 |
+
"venue": "In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11048\u201311064, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.",
|
| 344 |
+
"url": "https://doi.org/10.18653/v1/2022.emnlp-main.759"
|
| 345 |
+
}
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"28": {
|
| 349 |
+
"title": "GPT-4 technical report.",
|
| 350 |
+
"author": "OpenAI. 2023.",
|
| 351 |
+
"venue": "Preprint, arXiv:2303.08774.",
|
| 352 |
+
"url": "https://arxiv.org/abs/2303.08774"
|
| 353 |
+
}
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"29": {
|
| 357 |
+
"title": "What in-context learning \u201clearns\u201d in-context: Disentangling task recognition and task learning.",
|
| 358 |
+
"author": "Jane Pan, Tianyu Gao, Howard Chen, and Danqi Chen. 2023.",
|
| 359 |
+
"venue": "In Findings of the Association for Computational Linguistics: ACL 2023, pages 8298\u20138319, Toronto, Canada. Association for Computational Linguistics.",
|
| 360 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-acl.527"
|
| 361 |
+
}
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"30": {
|
| 365 |
+
"title": "Let\u2019s think dot by dot: Hidden computation in transformer language models.",
|
| 366 |
+
"author": "Jacob Pfau, William Merrill, and Samuel R. Bowman. 2024.",
|
| 367 |
+
"venue": "In First Conference on Language Modeling.",
|
| 368 |
+
"url": "https://openreview.net/forum?id=NikbrdtYvG"
|
| 369 |
+
}
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"31": {
|
| 373 |
+
"title": "Reasoning with language model prompting: A survey.",
|
| 374 |
+
"author": "Shuofei Qiao, Yixin Ou, Ningyu Zhang, Xiang Chen, Yunzhi Yao, Shumin Deng, Chuanqi Tan, Fei Huang, and Huajun Chen. 2023.",
|
| 375 |
+
"venue": "In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5368\u20135393, Toronto, Canada. Association for Computational Linguistics.",
|
| 376 |
+
"url": "https://doi.org/10.18653/v1/2023.acl-long.294"
|
| 377 |
+
}
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"32": {
|
| 381 |
+
"title": "Language models are unsupervised multitask learners.",
|
| 382 |
+
"author": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019.",
|
| 383 |
+
"venue": "OpenAI Blog.",
|
| 384 |
+
"url": "https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf"
|
| 385 |
+
}
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"33": {
|
| 389 |
+
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer.",
|
| 390 |
+
"author": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020.",
|
| 391 |
+
"venue": "Journal of Machine Learning Research, 21(140):1\u201367.",
|
| 392 |
+
"url": null
|
| 393 |
+
}
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"34": {
|
| 397 |
+
"title": "Impact of pretraining term frequencies on few-shot numerical reasoning.",
|
| 398 |
+
"author": "Yasaman Razeghi, Robert L. Logan IV, Matt Gardner, and Sameer Singh. 2022.",
|
| 399 |
+
"venue": "In Findings of EMNLP, pages 840\u2013854.",
|
| 400 |
+
"url": null
|
| 401 |
+
}
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"35": {
|
| 405 |
+
"title": "Language models are greedy reasoners: A systematic formal analysis of chain-of-thought.",
|
| 406 |
+
"author": "Abulhair Saparov and He He. 2023.",
|
| 407 |
+
"venue": "In The Eleventh International Conference on Learning Representations.",
|
| 408 |
+
"url": "https://openreview.net/forum?id=qFVVBzXxR2V"
|
| 409 |
+
}
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"36": {
|
| 413 |
+
"title": "Functional benchmarks for robust evaluation of reasoning performance, and the reasoning gap.",
|
| 414 |
+
"author": "Saurabh Srivastava, Annarose M B, Anto P V, Shashank Menon, Ajay Sukumar, Adwaith Samod T, Alan Philipose, Stevin Prince, and Sooraj Thomas. 2024.",
|
| 415 |
+
"venue": "arXiv preprint arXiv:2402.19450.",
|
| 416 |
+
"url": null
|
| 417 |
+
}
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"37": {
|
| 421 |
+
"title": "Chain of thoughtlessness? An analysis of CoT in planning.",
|
| 422 |
+
"author": "Kaya Stechly, Karthik Valmeekam, and Subbarao Kambhampati. 2024.",
|
| 423 |
+
"venue": "arXiv preprint arXiv:2405.04776.",
|
| 424 |
+
"url": null
|
| 425 |
+
}
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"38": {
|
| 429 |
+
"title": "Challenging BIG-bench tasks and whether chain-of-thought can solve them.",
|
| 430 |
+
"author": "Mirac Suzgun, Nathan Scales, Nathanael Sch\u00e4rli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. 2023.",
|
| 431 |
+
"venue": "In Findings of the Association for Computational Linguistics: ACL 2023, pages 13003\u201313051, Toronto, Canada. Association for Computational Linguistics.",
|
| 432 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-acl.824"
|
| 433 |
+
}
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"39": {
|
| 437 |
+
"title": "Language models don\u2019t always say what they think: Unfaithful explanations in chain-of-thought prompting.",
|
| 438 |
+
"author": "Miles Turpin, Julian Michael, Ethan Perez, and Samuel Bowman. 2024.",
|
| 439 |
+
"venue": "Advances in Neural Information Processing Systems, 36.",
|
| 440 |
+
"url": null
|
| 441 |
+
}
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"40": {
|
| 445 |
+
"title": "Planbench: An extensible benchmark for evaluating large language models on planning and reasoning about change.",
|
| 446 |
+
"author": "Karthik Valmeekam, Matthew Marquez, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. 2023.",
|
| 447 |
+
"venue": "In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track.",
|
| 448 |
+
"url": "https://openreview.net/forum?id=YXogl4uQUO"
|
| 449 |
+
}
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"41": {
|
| 453 |
+
"title": "Towards understanding chain-of-thought prompting: An empirical study of what matters.",
|
| 454 |
+
"author": "Boshi Wang, Sewon Min, Xiang Deng, Jiaming Shen, You Wu, Luke Zettlemoyer, and Huan Sun. 2023a.",
|
| 455 |
+
"venue": "In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2717\u20132739, Toronto, Canada. Association for Computational Linguistics.",
|
| 456 |
+
"url": "https://doi.org/10.18653/v1/2023.acl-long.153"
|
| 457 |
+
}
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"42": {
|
| 461 |
+
"title": "Self-consistency improves chain of thought reasoning in language models.",
|
| 462 |
+
"author": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023b.",
|
| 463 |
+
"venue": "In The Eleventh International Conference on Learning Representations.",
|
| 464 |
+
"url": "https://openreview.net/forum?id=1PL1NIMMrw"
|
| 465 |
+
}
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"43": {
|
| 469 |
+
"title": "Reasoning about a rule.",
|
| 470 |
+
"author": "Peter C. Wason. 1968.",
|
| 471 |
+
"venue": "Quarterly Journal of Experimental Psychology, 20(3):273\u2013281.",
|
| 472 |
+
"url": null
|
| 473 |
+
}
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"44": {
|
| 477 |
+
"title": "Psychology of reasoning: Structure and content, volume 86.",
|
| 478 |
+
"author": "Peter Cathcart Wason and Philip Nicholas Johnson-Laird. 1972.",
|
| 479 |
+
"venue": "Harvard University Press.",
|
| 480 |
+
"url": null
|
| 481 |
+
}
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"45": {
|
| 485 |
+
"title": "Chain of thought prompting elicits reasoning in large language models.",
|
| 486 |
+
"author": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022.",
|
| 487 |
+
"venue": "In Advances in Neural Information Processing Systems.",
|
| 488 |
+
"url": "https://openreview.net/forum?id=_VjQlMeSB_J"
|
| 489 |
+
}
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"46": {
|
| 493 |
+
"title": "Analyzing chain-of-thought prompting in large language models via gradient-based feature attributions.",
|
| 494 |
+
"author": "Skyler Wu, Eric Meng Shen, Charumathi Badrinath, Jiaqi Ma, and Himabindu Lakkaraju. 2023.",
|
| 495 |
+
"venue": "arXiv preprint arXiv:2307.13339.",
|
| 496 |
+
"url": null
|
| 497 |
+
}
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"47": {
|
| 501 |
+
"title": "An explanation of in-context learning as implicit Bayesian inference.",
|
| 502 |
+
"author": "Sang Michael Xie, Aditi Raghunathan, Percy Liang, and Tengyu Ma. 2022.",
|
| 503 |
+
"venue": "In International Conference on Learning Representations.",
|
| 504 |
+
"url": "https://openreview.net/forum?id=RdJVFCHjUMI"
|
| 505 |
+
}
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"48": {
|
| 509 |
+
"title": "The unreliability of explanations in few-shot prompting for textual reasoning.",
|
| 510 |
+
"author": "Xi Ye and Greg Durrett. 2022.",
|
| 511 |
+
"venue": "In Advances in Neural Information Processing Systems.",
|
| 512 |
+
"url": "https://openreview.net/forum?id=Bct2f8fRd8S"
|
| 513 |
+
}
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"49": {
|
| 517 |
+
"title": "Complementary explanations for effective in-context learning.",
|
| 518 |
+
"author": "Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, Veselin Stoyanov, Greg Durrett, and Ramakanth Pasunuru. 2023.",
|
| 519 |
+
"venue": "In Findings of the Association for Computational Linguistics: ACL 2023, pages 4469\u20134484, Toronto, Canada. Association for Computational Linguistics.",
|
| 520 |
+
"url": "https://doi.org/10.18653/v1/2023.findings-acl.273"
|
| 521 |
+
}
|
| 522 |
+
},
|
| 523 |
+
{
|
| 524 |
+
"50": {
|
| 525 |
+
"title": "Counterfactual memorization in neural language models.",
|
| 526 |
+
"author": "Chiyuan Zhang, Daphne Ippolito, Katherine Lee, Matthew Jagielski, Florian Tram\u00e8r, and Nicholas Carlini. 2023a.",
|
| 527 |
+
"venue": "Advances in Neural Information Processing Systems, 36:39321\u201339362.",
|
| 528 |
+
"url": null
|
| 529 |
+
}
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"51": {
|
| 533 |
+
"title": "What and how does in-context learning learn? Bayesian model averaging, parameterization, and generalization.",
|
| 534 |
+
"author": "Yufeng Zhang, Fengzhuo Zhang, Zhuoran Yang, and Zhaoran Wang. 2023b.",
|
| 535 |
+
"venue": "arXiv preprint arXiv:2305.19420.",
|
| 536 |
+
"url": null
|
| 537 |
+
}
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"52": {
|
| 541 |
+
"title": "Large language models are human-level prompt engineers.",
|
| 542 |
+
"author": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2023.",
|
| 543 |
+
"venue": "In The Eleventh International Conference on Learning Representations.",
|
| 544 |
+
"url": "https://openreview.net/forum?id=92gvk82DE-"
|
| 545 |
+
}
|
| 546 |
+
}
|
| 547 |
+
],
|
| 548 |
+
"url": "http://arxiv.org/html/2407.01687v2"
|
| 549 |
+
}
|
20241004/2407.07087v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2407.08495v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2407.11041v4.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2407.18215v2.json
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Tool-Assisted Learning of Computational Reductions",
|
| 3 |
+
"abstract": "Computational reductions are an important and powerful concept in computer science. However, they are difficult for many students to grasp. In this paper, we outline a concept for how the learning of reductions can be supported by educational support systems. We present an implementation of the concept within such a system, concrete web-based and interactive learning material for reductions, and report on our experiences using the material in a large introductory course on theoretical computer science.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "A computational reduction is a computable function mapping source instances of an algorithmic problem to target instances of an algorithmic problem with the property that\nis a positive instance of \n if and only if is a positive instance of\nIntuitively, if the algorithmic problem reduces to via such a function, then it is \u201cat most as hard\u201d as . Depending on the context, additional conditions are placed on reductions, such as polynomial-time computability.\nComputational reductions are an important concept that recurs in several areas of computer science, such as in algorithms (as an abstraction of calls to sub-algorithms), in computability theory (for establishing undecidability of algorithmic problems), in complexity theory (for establishing NP-hardness of properties), and in SAT solving (as an abstraction for encoding procedures) (GareyJ1979).\nBecause of their importance, computational reductions are often covered in mandatory or elective courses within undergraduate computer science curricula (see, for example, (ACM2013; GI2016)). At our university, for example, computational reductions are part of a broad introductory course to theoretical computer science in the second year of a bachelor\u2019s degree, which covers material on formal languages, foundations of computability theory, and foundations of complexity theory.\nIn our experience, many students find computational reductions difficult and struggle to understand them. This often results in students being reluctant to even attempt exercises involving reductions because they are perceived as too difficult.\nIn this paper we address the question:\nHow can tools support the learning of computational reductions in large introductory courses?\nTool support ideally addresses both (1) students\u2019 difficulties in understanding and applying the concept of reductions and (2) students\u2019 reluctance due to the (perceived) difficulty of the subject.\nIn classical (analogue) computational reduction assignments, several types of exercises are often used, including exercises for (A) understanding the algorithmic problems involved, (B) exploring existing reductions via examples, and (C) designing reductions between algorithmic problems. These exercises are often combined into multi-step exercises in which students first explore two concrete algorithmic problems and then reduce one to the other.\nExisting tool support for learning reductions focuses on exercises of types (A) and (B) (see related work below). This is not surprising, as exercises and solution attempts of these types are easy to represent graphically, and it is straightforward to assess the correctness of student attempts and compute feedback. While exercises of type (C) have been addressed by some tools \u2014 mainly by having students write reductions in a general-purpose programming language \u2014 none of the tools focus on how to actually support students in learning to design reductions. There is also no tool support for multi-step exercises for learning computational reductions.\nIn particular, the lack of support for exercises of type (C) is unfortunate, because such exercises are a challenge for most students. The likely reason for this is that the design of reductions does not usually follow a straightforward path, but requires some creativity on the part of the student. To teach such creatively demanding tasks, research in cognitive science suggests that it is helpful to teach novices how experts approach a problem, see (HendersonMS2015) for a discussion and further references.\nWhen searching for a reduction, a typical approach for experts is to try sequentially a number of building blocks that they have previously encountered in the context of other reductions (GareyJ1979).\nAn example of such a building block is provided by the standard reduction from the problem of finding a directed Hamiltonian cycle to finding an undirected Hamiltonian cycle. This reduction transforms a directed graph into an undirected graph by mapping each node \n\n to a small gadget \n\n.\nSuch small graph gadgets \u2014 such as replacing nodes or edges, or introducing a small global graph with some properties \u2014 are typical building blocks when designing reductions. We envision helping students to learn how to design reductions by exposing them to such gadgets in given reductions, and then having them design similar reductions themselves.111Gal-Ezer and Trakhtenbrot take a superficially related approach by trying to identify typical patterns in reductions in undecidability proofs (Gal-EzerT16b). Of course, in this approach, students do not directly see how to come up with new building blocks \u2014 a task required in a research environment. But they do learn how to construct reductions in a simplified framework."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "2. Exercises and Multi-step Exercises",
|
| 15 |
+
"text": "In this section we analyse the requirements for tool support for computational reductions; in the next section we report on our implementation of these requirements within the educational support system Iltis. We aim to support students with exercises for (A) understanding the algorithmic problems involved, (B) exploring existing reductions via examples, and (C) designing reductions between computational problems. We discuss these types of exercises and how they can be flexibly combined into multi-step exercises."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "3. Computational Reductions in Iltis",
|
| 21 |
+
"text": "In the previous section, we saw that for supporting students in their learning of computational reductions, educational support systems require (R1) support for multi-step exercises; (R2) a graph construction task; and (R3) a reduction design task. We implement these requirements within the educational support system Iltis.\nThe educational support system Iltis allows teachers to flexibly build multi-step exercises from a portfolio of small, easily composable educational tasks. Each task is configurable by inputs \u2013 either given explicitly or as the output of previous tasks \u2013 and provides objects created by students within that task as outputs. The outputs can then be used by subsequent tasks.\nThus, (R1) is a built-in feature of Iltis, and we focus on (R2) and (R3) in the following."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "4. Use in the classroom",
|
| 27 |
+
"text": "We used tool-based exercises for computational reductions described above in an introductory course on theoretical computer science. In the following, we describe the setting of the course (Section 4.1 ###reference_###), how reduction-related multi-step exercises have been used in the course (Section 4.2 ###reference_###), and how the usage of the material was perceived by the students (Section 4.3 ###reference_###). We emphasize that a sound study of the effectiveness of our material is out of scope for this paper, instead we report on data that hints at its usefulness."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "4.1",
|
| 31 |
+
"parent_section_id": "4",
|
| 32 |
+
"section_name": "4.1. Setting",
|
| 33 |
+
"text": "The course Foundations of Theoretical Computer Science at Ruhr University Bochum, Germany is an introductory course in formal languages, automata theory, computability and complexity theory in the third semester of a bachelor\u2019s degree in computer science and two related degree programmes. The course spans 12 weeks and is divided equally into four parts: The first two parts focus on regular and context-free languages, the third on decidability and computability, and the fourth on complexity theory, in particular on P and NP and the relevance of NP-completeness. Reductions are introduced as a formal foundation for (un)decidability in part three and specialized to polynomial reductions as a basis for NP-hardness results in part four.\nWe shortly describe the organization of this course in the winter terms 2022/23 and 2023/24: (i) The content of the course is provided in lectures with slides, (ii) for practising the content, weekly homework assignment sheets are provided. Assignments are to be submitted partly in analogue form in groups of up to three students (paper-based assignments) and partly individually via the educational support system Iltis (web-based assignments). For points achieved on the assignments, a (small) bonus on the grade in the final exam is provided. Students can also use (iii) further assignments in the support system for free practice (practice assignments).\nPer week, (I) there are two 90\u2009min lectures in which the lecture slides are presented, also (II) there are tutorial sessions in which solutions for the homework assignments (and also other assignments) are discussed. Additionally, (III) in the second half of the course, a help desk for asking questions is offered."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4.2",
|
| 37 |
+
"parent_section_id": "4",
|
| 38 |
+
"section_name": "4.2. Material",
|
| 39 |
+
"text": "In winter term 2023/24, in addition to paper-based assignments, web-based assignments for computational reductions as described in this paper are used for the first time, at three stages:\nIn Week 9, computational reductions are introduced in the context of computability. In the lecture, the first examples are for familiar graph problems, later reductions for problems involving Turing machines are introduced. In the web-based assignments, we focus on graph-based reductions. Students explore a reduction via a global gadget; they prove that a given (very similar) function is not a reduction; and they construct a global gadget reduction that is very similar to the one they explored, but uses different algorithmic problems. For each algorithmic problem, tasks for understanding the problem are offered.\nIn Week 11, polynomial reductions are introduced. In the web-based assignments, we replicate Assignment Workflow 1, but use reductions with an edge gadget. Figure 1 ###reference_### shows an excerpt of these assignments.\nIn a recap at the end of the course, we iterate the assignment workflows for non-edge gadget constructions. Here, designing a reduction is embedded into a multi-step exercise for proving NP-hardness of an algorithmic problem. This includes exercises for students to select the correct direction of the reduction and choosing the correct time complexity of the constructed reduction. All the algorithmic problems have been encountered before, so no exercises for understanding these problems are necessary.\nThe material for the workflows is provided under the link https://iltis.cs.tu-dortmund.de/computational-reductions/ ###reference_nal-reductions/###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.3",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "4.3. Experience Report",
|
| 45 |
+
"text": "We first report on challenges when teaching reductions and then discuss our experiences on the usefulness of our system.\nInstructors who taught the course report that reductions are one of the hardest topics of the course. Even though several weeks are spent on reductions, many students struggle and give up on this topic. Interestingly, this is not limited to the task of designing reductions \u2014 which is difficult and sometimes frustrating as it depends on experience and oftentimes a certain intuition about the problems involved \u2014 but also effects tasks with significantly lower difficulty, e.\u2009g. applying a reduction to a given input \u2014 that is, applying a function to a given input.\nThis can also be witnessed in exams, where sub-assignments that ask to provide positive/negative instances for problems and to apply functions to inputs are attempted less frequently than expected.\nWe discuss our experiences and observations when using the Assignment Workflows 1 \u2013 3.\nThe material was used extensively. As a point of reference, the web-based assignments in multiple choice format, that were posed in the weeks surrounding the weeks with the reduction assignments, were completed by an average of 151 students (176 in Week 8, 162 in Week 10, 119 in Week 12).222The course was attended by students. At least one web-based assignment was completed by 311 students; 317 enrolled for the exam. Since assignments are not mandatory, the number of active students drops significantly over the semester.\nFor the reduction-related assignments, the single steps of the Assignment Workflows 1 and 2 in Weeks 9 and 11 were completed by between 100 and 140 students (as Workflow 3 was not part of the bonus point scheme, it was attempted much less often).\nFrom the students that completed the multi-step exercise for understanding the relevant problems, 88\u2009% (Workflow 1) to 92\u2009% (Workflow 2) also completed the assignments for designing a new reduction. These rates are considerably higher than rates estimated by teachers who grade paper-based assignments on reductions.\nThis suggests that our tool provides students with a sense of achievement.\nIn addition, students only needed an average of 2.0 (Workflow 1) to 2.2 (Workflow 2) attempts on the reduction design task.\nFrom our data we cannot conclude whether this effect manifested in more students than usual attempting the paper-based assignments on reductions, i.\u2009e. whether our material had an overall motivating effect. We leave this for future work.\nDuring the submission phase, after they designed a reduction, we asked students whether (i) they felt prepared to provide the reduction by the previous steps of the assignment workflows; (ii) they felt it was easy to input the reduction in the required format; and whether (iii) they felt prepared to write the reduction on paper after providing it in our task. For all questions, we used a\nsemantic differential\nscale from 1 to 5 with 1 meaning \u201cvery easy\u201d/\u201cvery well prepared\u201d and 5 meaning \u201cvery hard\u201d/\u201cvery badly prepared\u201d. As only 8 students answered the survey for Workflow 3 in the recap section, we focus on Workflows 1 and 2 for which we have \u00bf 90 answers each.\nFirst, we report on the answers to the survey after Workflow 1: For question (i), the average is 2.7, which means that on average, students felt only slightly prepared for the reduction by the previous workflow steps.\nFor question (ii), the average is 3.0, meaning that students found it moderately difficult to enter the reduction in the required format.\nFinally, for question (iii), the average is 3.3, meaning that the students did not really feel prepared to write the given reduction on paper in the usual notation.\nEven though the students were more familiar with the type of reduction exercises in the second workflow, the average of the answers after Workflows 1 and 2 was very close for all questions (differences of up to 0.3).\nFor all questions we observed a high variability of the answers (the standard deviation was between 1.0 and 1.3).\nAs designing reductions is inherently difficult, it is not surprising that students tend to feel that \u2014 even with tool support \u2014 coming up with reductions is difficult. Yet, these ratings can be taken as an indication to improve the wording of the exercises and to provide more information on how to transfer reductions to paper."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "5",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "5. Conclusion and Outlook",
|
| 51 |
+
"text": "We outlined how support for learning computational reductions can be integrated into educational support systems such as Iltis. We devised teaching material and used it in a large undergraduate course. An important direction for future research is to explore whether our material reduces students\u2019 reluctance and initial barriers to engaging with computational reductions in assignments. The observation that our material provides a sense of achievement can serve as a starting point. It would also be beneficial to assess whether our material is effective for learning algorithmic problems and specific computational reductions. Similarly, exploring the extent to which students can transfer their knowledge of specific reductions and building blocks to the design of other reductions would be a worthwhile direction."
|
| 52 |
+
}
|
| 53 |
+
],
|
| 54 |
+
"appendix": [],
|
| 55 |
+
"tables": {},
|
| 56 |
+
"image_paths": {
|
| 57 |
+
"1(a)": {
|
| 58 |
+
"figure_path": "2407.18215v2_figure_1(a).png",
|
| 59 |
+
"caption": "Figure 1. Illustration of a multi-step exercise for constructing computational reductions between two problems. Some steps are illustrated by screenshots from the system. Parts of this illustration are adopted from (SchmellenkampVZ24).",
|
| 60 |
+
"url": "http://arxiv.org/html/2407.18215v2/x1.png"
|
| 61 |
+
},
|
| 62 |
+
"1(b)": {
|
| 63 |
+
"figure_path": "2407.18215v2_figure_1(b).png",
|
| 64 |
+
"caption": "Figure 1. Illustration of a multi-step exercise for constructing computational reductions between two problems. Some steps are illustrated by screenshots from the system. Parts of this illustration are adopted from (SchmellenkampVZ24).",
|
| 65 |
+
"url": "http://arxiv.org/html/2407.18215v2/x2.png"
|
| 66 |
+
},
|
| 67 |
+
"1(c)": {
|
| 68 |
+
"figure_path": "2407.18215v2_figure_1(c).png",
|
| 69 |
+
"caption": "Figure 1. Illustration of a multi-step exercise for constructing computational reductions between two problems. Some steps are illustrated by screenshots from the system. Parts of this illustration are adopted from (SchmellenkampVZ24).",
|
| 70 |
+
"url": "http://arxiv.org/html/2407.18215v2/x3.png"
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
"validation": true,
|
| 74 |
+
"references": [],
|
| 75 |
+
"url": "http://arxiv.org/html/2407.18215v2"
|
| 76 |
+
}
|
20241004/2407.19000v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2408.04226v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2408.04391v2.json
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "An ASME Journal Article Created Using LaTeX2\u03f5 in ASME Format for Testing Your Figures",
|
| 3 |
+
"abstract": "This is the abstract.\nThis article illustrates preparation of ASME paper using\nLaTeX2.\nAn abstract for an ASME paper should be less than 150 words and is normally in italics.\nPlease use this template to test how your figures will look on the printed journal page of the Journal of Mechanical Design. The Journal will no longer publish papers that contain errors in figure resolution. These usually consist of unreadable or fuzzy text, and pixilation or rasterization of lines. This template identifies the specifications used by JMD some of which may not be easily duplicated; for example, ASME actually uses Helvetica Condensed Bold, but this is not generally available so for the purpose of this exercise Helvetica is adequate. However, reproduction of the journal page is not the goal, instead this exercise is to verify the quality of your figures. Notice that this abstract is to be set in 9pt Times Italic, single spaced and right justified.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "This article illustrates preparation of ASME paper using LaTeX2. The LaTeX macro asme2ej.cls, the BibTeX style file asmems4.bst, and the template asme2ej.tex that create this article are available on the WWW at the URL address http://iel.ucdavis.edu/code/ ###reference_el.ucdavis.edu/code/###. To ensure compliance with the 2003 ASME MS4 style guidelines [asmemanual], you should modify neither the LaTeX macro asme2ej.cls nor the BibTeX style file asmems4.bst. By comparing the output generated by typesetting this file and the LaTeX2 source file, you should find everything you need to help you through the preparation of ASME paper using LaTeX2. Details on using LaTeX can be found in [latex].\nIn order to get started in generating a two-column version of your paper, please format the document with 0.75in top margin, 1.5in bottom margin and 0.825in left and right margins. Break the text into two sections one for the title heading, and another for the body of the paper.\nThe format of the heading is not critical, on the other hand formatting of the body of the text is the primary goal of this exercise. This will allow you to see that the figures are matched to the column width and font size of the paper. The double column of the heading section is set to 1.85in for the first column, a 0.5in spacing, and 4.5in for the second column. For the body of the paper, set it to 3.34in for both columns with 0.17in spacing, both are right justified.\nThe information that is the focus of this exercise is found in\nsection 6 ###reference_###.\nPlease use this template to format your paper in a way that is similar to the printed form of the Journal of Mechanical Design. This will allow you to verify that the size and resolution of your figures match the page layout of the journal. The ASME Journal of Mechanical Design will no longer publish papers that have the errors demonstrated here.\nASME simply requires that the font should be the appropriate size and not be blurred or pixilated, and that lines should be the appropriate weight and have minimal, preferably no, pixilation or rasterization.\nThe journal uses 10pt Times Roman Bold for headings, but Times Bold is good enough for this effort. The text is set at 9pt Times Roman, and again Times will be fine. Insert a new line after the heading, and two lines after each section. This is not exactly right but it is close enough."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Very Very Very Very Very Very Very Very Very Very Very Long Heading",
|
| 15 |
+
"text": "The heading is boldface with upper and lower case letters.\nIf the heading should run into more than one line, the run-over is not left-flushed."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Second-Level Heading",
|
| 21 |
+
"text": "The next level of heading is also boldface with upper and lower case letters.\nThe heading is flushed left with the left margin. The spacing to the next heading is two line spaces."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.1.1",
|
| 25 |
+
"parent_section_id": "2.1",
|
| 26 |
+
"section_name": "2.1.1 Third-Level Heading.",
|
| 27 |
+
"text": "The third-level of heading follows the style of the second-level heading."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "Use of SI Units",
|
| 33 |
+
"text": "An ASME paper should use SI units. When preference is given to SI units, the U.S. customary units may be given in parentheses or omitted. When U.S. customary units are given preference, the SI equivalent shall be provided in parentheses or in a supplementary table."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Footnotes11footnotemark: 1",
|
| 39 |
+
"text": "Footnotes are referenced with superscript numerals and are numbered consecutively from 1 to the end of the paper333Avoid footnotes if at all possible.. Footnotes should appear at the bottom of the column in which they are referenced."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "5",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "Mathematics",
|
| 45 |
+
"text": "Equations should be numbered consecutively beginning with (1) to the end of the paper, including any appendices. The number should be enclosed in parentheses and set flush right in the column on the same line as the equation. An extra line of space should be left above and below a displayed equation or formula. LaTeX can automatically keep track of equation numbers in the paper and format almost any equation imaginable. An example is shown in Eqn. (1 ###reference_###). The number of a referenced equation in the text should be preceded by Eqn. unless the reference starts a sentence in which case Eqn. should be expanded to Equation."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "6",
|
| 49 |
+
"parent_section_id": null,
|
| 50 |
+
"section_name": "Figures",
|
| 51 |
+
"text": "All figures should be positioned at the top of the page where possible. All figures should be numbered consecutively and centered under the figure as shown in Fig. 1 ###reference_###. All text within the figure should be no smaller than 7 pt. There should be a minimum two line spaces between figures and text. The number of a referenced figure or table in the text should be preceded by Fig. or Tab. respectively unless the reference starts a sentence in which case Fig. or Tab. should be expanded to Figure or Table.\nIn the following subsections, I have inserted figures that have been provided by authors in order to demonstrate what to avoid. In each case the authors provided figures that are 3.25in wide and 600dpi in the .tif graphics format. The papers containing these figures have been held from production due to their poor quality."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "6.1",
|
| 55 |
+
"parent_section_id": "6",
|
| 56 |
+
"section_name": "The 1st Example of Bad Figure",
|
| 57 |
+
"text": "###figure_1### In order to place the figure in this template using MSWord, select Insert Picture from File, and use wrapping that is top and bottom. Make sure the figure is 3.25in wide.\nFigure \u20182 ###reference_###\nwas taken from a recent paper that was held from publication, because the text is fuzzy and unreadable. It was probably obtained by taking a screen shot of the computer output of the authors software. This means the original figure was 72dpi (dots per inch) on a computer screen. There is no way to improve the quality such a low resolution figure.\nIn order to understand how poor the quality of this figure is, please zoom in slightly, say to 200%. Notice that while the font of the paper is clear at this size, the font in the figures is fuzzy and blurred. It is impossible to make out the small symbol beside the numbers along the abscissa of the graph. Now consider the labels Time and Cost. They are clearly in fonts larger that the text of the article, yet the pixilation or rasterization, associated with low resolution is obvious. This figure must be regenerated at higher resolution to ensure quality presentation.\nThe poor quality of this figure is immediately obvious on the printed page, and reduces the impact of the research contribution of the paper, and in fact detracts from the perceived quality of the journal itself."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "6.2",
|
| 61 |
+
"parent_section_id": "6",
|
| 62 |
+
"section_name": "The 2nd Example of Bad Figure",
|
| 63 |
+
"text": "###figure_2### Figure 3 ###reference_###\ndemonstrates a common problem that arises when a figure is scaled down fit a single column width of 3.25in. The original figure had labels that were readable at full size, but become unreadable when scaled to half size. This figure also suffers from poor resolution as is seen in the jagged lines the ovals that form the chain.\nThis problem can be addressed by increasing the size of the figure to a double column width of 6.5in, so the text is readable. But this will not improve the line pixilation, and a large low resolution figure is less desirable than a small one. This also significantly expands the length of the paper, and may cause it to exceed the JMD nine page limit. Additional pages require page charges of $200 per page. It is best to regenerate the figure at the resolution that ensures a quality presentation."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "6.3",
|
| 67 |
+
"parent_section_id": "6",
|
| 68 |
+
"section_name": "The 3rd Example of Bad Figure",
|
| 69 |
+
"text": "###figure_3### \n###figure_4### An author provided the high resolution image\nin Fig. 4 ###reference_###\nthat was sized to a single column width of 3.25in. Upon seeing the poor quality of the text, the publisher scaled the image to double column width as shown in Fig. 5 ###reference_###\nat which point it took half of a page. The publisher went on to do this for all eight figures generating four pages of figures that the author did not expect. ASME stopped production of the paper even with the larger figures due to the pixilation of the font.\nClearly the text in this figure is unreadable, and it is doubtful that the author can print the output in a way that it is readable. This is a problem that the author must solve, not the publisher.\nAs you might expect, I have many more examples, but in the end the author is the best judge of what is needed in each figure. ASME simply requires that the image meet a minimum standard for font and line quality, specifically the font should be the appropriate size and not be blurred or pixilated, and that lines should be the appropriate weight and have minimal, preferably no, pixilation or rasterization."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "7",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "Tables",
|
| 75 |
+
"text": "All tables should be numbered consecutively and centered above the table as shown in Table 1 ###reference_###. The body of the table should be no smaller than 7 pt. There should be a minimum two line spaces between tables and text."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "8",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Citing References",
|
| 81 |
+
"text": "The ASME reference format is defined in the authors kit provided by the ASME. The format is:\nText Citation. Within the text, references should be cited in numerical order according to their order of appearance. The numbered reference citation should be enclosed in brackets.\nThe references must appear in the paper in the order that they were cited. In addition, multiple citations (3 or more in the same brackets) must appear as a \u201c [1-3]\u201d. A complete definition of the ASME reference format can be found in the ASME manual [asmemanual].\nThe bibliography style required by the ASME is unsorted with entries appearing in the order in which the citations appear. If that were the only specification, the standard BibTeX unsrt bibliography style could be used. Unfortunately, the bibliography style required by the ASME has additional requirements (last name followed by first name, periodical volume in boldface, periodical number inside parentheses, etc.) that are not part of the unsrt style. Therefore, to get ASME bibliography formatting, you must use the asmems4.bst bibliography style file with BibTeX. This file is not part of the standard BibTeX distribution so you\u2019ll need to place the file someplace where LaTeX can find it (one possibility is in the same location as the file being typeset).\nWith LaTeX/BibTeX, LaTeX uses the citation format set by the class file and writes the citation information into the .aux file associated with the LaTeX source. BibTeX reads the .aux file and matches the citations to the entries in the bibliographic data base file specified in the LaTeX source file by the \\bibliography command. BibTeX then writes the bibliography in accordance with the rules in the bibliography .bst style file to a .bbl file which LaTeX merges with the source text. A good description of the use of BibTeX can be found in [latex, goosens] (see how two references are handled?). The following is an example of how three or more references [latex, asmemanual, goosens] show up using the asmems4.bst bibliography style file in conjunction with the asme2ej.cls class file. Here are some more [art, blt, ibk, icn, ips, mts, mis, pro, pts, trt, upd] which can be used to describe almost any sort of reference."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "9",
|
| 85 |
+
"parent_section_id": null,
|
| 86 |
+
"section_name": "Conclusions",
|
| 87 |
+
"text": "The only way to ensure that your figures are presented in the ASME Journal of Mechanical Design in the way you feel is appropriate and meets the requirement for quality presentation is for you to prepare a double column version of the paper in a form similar to that used by the Journal.\nThis gives you the opportunity to ensure that the figures are sized appropriately, in particular that the labels are readable and match the size of the text in the journal, and that the line weights and resolutions have no pixilation or rasterization. Poor quality figures are immediately obvious on the printed page, and this detracts from the perceived quality of the journal.\nI am pleased to provide advice on how to improve any figure, but this effort must start with a two-column version of the manuscript. Thank you in advance for your patience with this effort, it will ensure quality presentation of your research contributions."
|
| 88 |
+
}
|
| 89 |
+
],
|
| 90 |
+
"appendix": [
|
| 91 |
+
{
|
| 92 |
+
"section_id": "Appendix x1",
|
| 93 |
+
"parent_section_id": null,
|
| 94 |
+
"section_name": "Appendix A: Head of First Appendix",
|
| 95 |
+
"text": "Avoid Appendices if possible."
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"section_id": "Appendix x2",
|
| 99 |
+
"parent_section_id": null,
|
| 100 |
+
"section_name": "Appendix B: Head of Second Appendix",
|
| 101 |
+
"text": "The equation counter is not reset in an appendix and the numbers will\nfollow one continual sequence from the beginning of the article to the very end as shown in the following example."
|
| 102 |
+
}
|
| 103 |
+
],
|
| 104 |
+
"tables": {
|
| 105 |
+
"1": {
|
| 106 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S7.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Figure and table captions do not end with a period</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S7.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S7.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S7.T1.1.1.1.1\">Example</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S7.T1.1.1.1.2\">Time</th>\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" id=\"S7.T1.1.1.1.3\">Cost</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S7.T1.1.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_t\" id=\"S7.T1.1.2.1.1\">1</th>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S7.T1.1.2.1.2\">12.5</td>\n<td class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S7.T1.1.2.1.3\">$1,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S7.T1.1.3.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_b\" id=\"S7.T1.1.3.2.1\">2</th>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S7.T1.1.3.2.2\">24</td>\n<td class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S7.T1.1.3.2.3\">$2,000</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 107 |
+
"capture": "Table 1: Figure and table captions do not end with a period"
|
| 108 |
+
}
|
| 109 |
+
},
|
| 110 |
+
"image_paths": {
|
| 111 |
+
"2": {
|
| 112 |
+
"figure_path": "2408.04391v2_figure_2.png",
|
| 113 |
+
"caption": "Figure 2: Example taken from a paper that was held from production because the image quality is poor. ASME sets figures captions in 8pt, Helvetica Bold.",
|
| 114 |
+
"url": "http://arxiv.org/html/2408.04391v2/figure/FMANU_MD_05_1107_11.jpg"
|
| 115 |
+
},
|
| 116 |
+
"3": {
|
| 117 |
+
"figure_path": "2408.04391v2_figure_3.png",
|
| 118 |
+
"caption": "Figure 3: While this figures is easily readable at a double column width of 6.5in, when it is shrunk to 3.25in column width the text is unreadable. This paper was held from production.",
|
| 119 |
+
"url": "http://arxiv.org/html/2408.04391v2/figure/FMANU_MD_05_1272_5.jpg"
|
| 120 |
+
},
|
| 121 |
+
"4": {
|
| 122 |
+
"figure_path": "2408.04391v2_figure_4.png",
|
| 123 |
+
"caption": "Figure 4: Another example of a figure with unreadable text. Even when the paper was expanded to double column width the text as shown in Fig. 5 was of such low quality that the paper was held from production.",
|
| 124 |
+
"url": "http://arxiv.org/html/2408.04391v2/figure/FMANU_MD_04_1274_13.jpg"
|
| 125 |
+
},
|
| 126 |
+
"5": {
|
| 127 |
+
"figure_path": "2408.04391v2_figure_5.png",
|
| 128 |
+
"caption": "Figure 5: A figure expanded to double column width the text from Figure 4",
|
| 129 |
+
"url": "http://arxiv.org/html/2408.04391v2/figure/FMANU_MD_04_1274_13.jpg"
|
| 130 |
+
}
|
| 131 |
+
},
|
| 132 |
+
"validation": true,
|
| 133 |
+
"references": [],
|
| 134 |
+
"url": "http://arxiv.org/html/2408.04391v2"
|
| 135 |
+
}
|
20241004/2408.06520v2.json
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Retrieval-Augmented Hierarchical in-Context Reinforcement Learning and Hindsight Modular Reflections for Task Planning with LLMs",
|
| 3 |
+
"abstract": "Large Language Models (LLMs) have demonstrated remarkable abilities in various language tasks, making them promising candidates for decision-making in robotics. Inspired by Hierarchical Reinforcement Learning (HRL), we propose Retrieval-Augmented in-context reinforcement Learning (RAHL), a novel framework that decomposes complex tasks into sub-tasks using an LLM-based high-level policy, in which a complex task is decomposed into sub-tasks by a high-level policy on-the-fly. The sub-tasks, defined by goals, are assigned to the low-level policy to complete.\nTo improve the agent\u2019s performance in multi-episode execution, we propose Hindsight Modular Reflection (HMR), where, instead of reflecting on the full trajectory, we\nlet the agent reflect on shorter sub-trajectories to improve reflection efficiency. We evaluated the decision-making ability of the proposed RAHL in three benchmark environments\u2013ALFWorld, Webshop, and HotpotQA. The results show that RAHL can achieve an improvement in performance in 9%, 42%, and 10% in 5 episodes of execution in strong baselines.\nFurthermore, we also implemented RAHL on the Boston Dynamics SPOT robot. The experiment shows that the robot can scan the environment, find entrances, and navigate to new rooms controlled by the LLM policy.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "The recent advent of Large Language Models (LLMs) [1 ###reference_b1###, 2 ###reference_b2###, 3 ###reference_b3###, 4 ###reference_b4###, 5 ###reference_b5###] has revolutionized Artificial Intelligence (AI), prompting researchers to re-examine existing algorithms and applications within the context of these powerful models. LLMs have demonstrated remarkable few-shot in-context learning capabilities through prompts [1 ###reference_b1###, 6 ###reference_b6###], even surpassing traditional gradient-based approaches.\nAs a result, AI models built upon LLMs can be tailored to user needs without expensive fine-tuning or retraining, while still achieving competitive performance.\nOne particularly exciting area of research is the application of LLMs to robotic applications, including path planning [7 ###reference_b7###, 8 ###reference_b8###], grasping [9 ###reference_b9###, 10 ###reference_b10###], task planning [11 ###reference_b11###, 12 ###reference_b12###, 13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###, 16 ###reference_b16###], skill sythesis [17 ###reference_b17###, 18 ###reference_b18###], scene understanding [19 ###reference_b19###], manipulation [20 ###reference_b20###, 21 ###reference_b21###, 22 ###reference_b22###, 23 ###reference_b23###, 24 ###reference_b24###], etc.\nIn these works, LLMs serve as policies\nand sometimes as evaluators.\n###figure_1### ###figure_2### In this work, we consider LLM-based task planning, where we transform LLMs into a Reinforcement Learning (RL) policy. Although several LLM-based task planning frameworks have been proposed, most of them do not close the loop, and, as a result, there is no policy improvement over multi-episodic execution.\nIn this work, we propose Retrieval-Augmented Hierarchical in-context reinforcement Learning (RAHL), a simple yet effective framework that combines Retrieval Augmented Generation (RAG) and goal-conditioned Hierarchical Reinforcement Learning (HRL) to enhance the performance of in-context RL. In RAHL, a high-level policy proposes sub-goals for a low-level policy to accomplish, enabling the decomposition of complex tasks into manageable sub-tasks. During this process, experiences summarized from previous sub-tasks will be retrieved from the memory as augmentation.\nFurthermore, we propose Hindsight Modular Reflection (HMR) to facilitate multi-episode learning. HMR decomposes the reflection process into two components: (1) low-level reflection, which focuses on the actions taken to achieve each sub-goal, and (2) high-level reflection, which considers the overall sequence of proposed sub-goals. By providing hierarchical reflections, HMR enables the agent to identify areas for improvement and refine its strategies accordingly.\nFig. 1(a) ###reference_sf1### presents a visual demonstration of the proposed RAHL framework with an example from the ALFWorld environment with the task: cool some mug and put it in cabinet. The generated action, which can be regarded as a plan because it is not directly mapped to the robot control signal, is sent to a lower-level executor to perform bottom-level control, i.e., a sequence of control signals.\nUnlike traditional HRL methods, such as Hierarchical Actor Critic (HAC) [25 ###reference_b25###] and option-critic [26 ###reference_b26###], the proposed method focuses more on language-integrated learning, that is, our method transforms a general LLM to an LLM that can perform decision-making and improve itself in a multi-episode execution process.\nTo evaluate the effectiveness of RAHL, we conduct experiments on three diverse decision-making datasets/environments: ALFWorld [27 ###reference_b27###], an indoor household environment for robotic tasks; Webshop [28 ###reference_b28###], an online shopping environment where the agent acts as a shopping assistant; and HotpotQA [29 ###reference_b29###], a database of search-based questions and answers. Our results demonstrate that RAHL significantly outperforms existing in-context learning methods. Specifically, RAHL achieves a 9% improvement in the success rate in ALFWorld, a 42% improvement in Webshop (establishing a new state-of-the-art) and a 10% improvement on HotpotQA in five episodes. Moreover, we also demonstrate the performance of RAHL using a robot experiment using Boston Dynamics SPOT.\nOur contribution can be summarized as follows.\nWe propose RAHL, a decision-making framework where the high-level policy decomposes the task into sub-tasks for the low-level policy to complete.\nWe propose HMR to promote the performance of verbal reflection. Instead of reflecting on the full trajectory, which could be inefficient because of the length of the trajectories, we propose two levels of reflection. In the low-level reflection, the low-level policy\nreflects on sub-trajectories separated by goals, while in the high-level reflection, the high-level policy reflects on the sequence of proposed goals.\nWe perform evaluations in three different benchmark environments and show that RAHL can achieve improvements over strong baselines. We also performed an ablation study to further quantify the contribution to the performance of each part of the framework.\nWe conducted hardware experiments to demonstrate the hardware integration capability of the proposed method. The robot used is a Boston Dynamics SPOT with two cameras mounted on the robot head facing front left and front right, respectively."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II Related Work",
|
| 15 |
+
"text": "LLM-based task planning:\nThere are some works that study LLM-based task planning. However, some of these works [16 ###reference_b16###] directly combine LLMs with task-planning languages without further improvements. Other frameworks [11 ###reference_b11###, 12 ###reference_b12###, 13 ###reference_b13###, 14 ###reference_b14###, 15 ###reference_b15###] have more sophisticated designs, but do not incorporate environmental feedback for policy improvement. Although ISR-LLM [13 ###reference_b13###] adopts a feedback loop to improve the LLM-generated plan before execution, the feedback is based on an LLM evaluator, which might be prone to errors.\nIn-context RL:\nAs opposed to gradient-based RL, in-context RL does not train the model directly, but guides the model with contexts (e.g., instruction or few-shot examples). Yao et al. [30 ###reference_b30###] proposed ReAct, in which LLMs are guided to generate tractable thought traces to solve the problem. ReAct is an open-loop approach because there is no feedback involved. Building upon ReAct, Shinn et al. [31 ###reference_b31###] proposed Reflexion as a closed-loop solution, in which the reflection on the past episode is generated by the LLMs and used as part of the context for the next episode. Another work, ADaPT [32 ###reference_b32###], also adopts the open-loop design and generates a plan to decompose a task into smaller sub-tasks at the beginning of execution. In addition, ADaPT employs a task completion heuristic to determine whether a task is achievable. If not, they will break the task again into smaller sub-tasks.\nBrook et al. [33 ###reference_b33###] proposed model-based in-context learning to implement policy iteration without training the LLM parameters. The LLMs are used to predict the next state given the current state and action and are also used as the action value function.\nHao et al. [34 ###reference_b34###] proposed RAP, where they regard the LLM as the world model and the decision-maker with a reasoning tree as the backbone. Murthy et al. [35 ###reference_b35###] proposed REX, in which a Monte Carlo tree is used as the backbone to guide agent exploration combined with Upper Confidence Bound (UCB) to balance exploration and exploitation. Zhao et al. proposed ExpeL [36 ###reference_b36###], which first collects a few episodes of trajectories and uses them to generate a set of rules as insights to guide future executions. Belkhale et al. [37 ###reference_b37###] proposed RT-H, a hierarchical planning framework for robotic manipulation. Their framework is closely integrated with the Vision Language Model (VLM), which first predicts an action in natural language, and then translates this action into robot actions.\nAmong these methods,\nRT-H and ADaPT also adopt a hierarchical structure, but there is no policy improvement after the first episode."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "III Proposed Work",
|
| 21 |
+
"text": "In this section, we first define the problem for our work in Sect. III-A ###reference_###. We then introduce the proposed RAHL in Sect. III-B ###reference_###."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "III-A Problem Definition",
|
| 27 |
+
"text": "RL algorithms can solve problems modeled as a Markov Decision Process (MDP). An MDP is defined by a state space , which characterizes the system\u2019s properties, and an action space . The core component of an RL agent is the policy , which maps states to actions. After executing an action, the environment transitions to a new state according to a possibly unknown state transition function and generates a reward for the agent, defined by a reward function . The goal of RL algorithms is to maximize the expected return , where is the maximum number of steps, is the discount factor, and is the reward.\nIn our work, we consider a retrieval-augmented goal-conditioned MDP based on the conventional MDP introduced above, where the action depends not only on the observation but also on a goal, , generated by a high-level policy, , where is the state space augmented with high-level memory. Given the goal, the low-level policy , where is the low-level memory-augmented state space, makes decisions to achieve the goal.\nIn practice, since the state space actually consists of texts, we concatenate the retrieved reflections generated by HMR with the original state as input to the LLM policy.\nTo determine whether a goal has been reached, we introduce the finisher, .\nThe finisher determines whether a goal has been achieved by examining the execution history of the sub-task. It outputs \u201cYes\u201d if the goal is completed and \u201cNo\u201d otherwise. In particular, the finisher only takes the trajectory after the last completed goal as input. If the finisher determines that the current goal is achieved, the high-level policy is queried to propose a new goal to progress toward completing the main task. If the goal has not yet been achieved, the low-level policy is queried to generate another action."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "III-B RAHL",
|
| 33 |
+
"text": "RAHL leverages the in-context learning ability of LLMs by formatting relevant information as prompts. The prompt in our work consists of three parts\u2013i) the few-shot examples, ii) the retrieved high- and low-level reflections from the memory, and iii) the tags to guide the generation process. In the rest of this section, we will introduce these components and the decision-making workflow. The structure of the proposed framework is shown in Fig. 1(b) ###reference_sf2###.\nHierarchical in-context decision-making:\nThe generation process can be viewed as a hierarchical decision-making process. The high-level policy in this process generates goals, whereas the low-level policy generates actions to achieve those goals. Unlike ADaPT, which devises sub-tasks in the planning phase before execution, RAHL generates goals step by step based on the task and the history of the current episode. This online goal generation allows the agent to correct errors made in previous sub-tasks. The low-level policy focuses on achieving the given goal, regardless of the main task, effectively decomposing the complex task into smaller, more manageable sub-tasks that the language agent can solve. Following the approach of ReAct, we allow the agent to \u201cthink\u201d before acting, making the agent\u2019s behavior more tractable and explainable. This hierarchical process is performed by tag-guided prompting.\nTo mimic the human decision-making process, we inject prior knowledge into the process by guiding the agent with tags. Specifically, we define four kinds of tags\u2013[Goal], [Think], [Action], and [Finish]. The execution starts with a [Goal] tag followed by a [Think] tag to devise a plan for completing the goal. Then, a [Action] tag will be prompted to generate an action to interact with the environment. After each action, [Finish] will be asked to determine whether the goal is achieved. The input prompts to the LLM consist of three parts\u2013reflections from previous trajectories, which are stored in the long-term memory; the trajectory so far, which is stored in the short-term memory; and few-shot examples. The few-shot examples contain the entire decision-making process (i.e., full trajectories) from which the agent can learn. The intuition behind using full-trajectory examples instead of modular examples is that the agent needs to consider the entire trajectory to correct its mistakes and make more logical decisions. Fig. 2 ###reference_### shows an example process of generating goals and actions. Note that goals are not updated at every step; instead, they are only updated (i.e., regenerated) when the finisher returns 1.\n###figure_3### Hindsight modular reflection:\nThe hierarchical decision-making process might fail. As opposed to parameter-based methods, which update the model parameters when there is a failure,\nwe adopt modular verbal feedback to improve the agent\u2019s performance over multiple episodes, where the failed trials are summarized into one or two sentences and stored in the long-term memory for future trials. However, reflecting on the full trajectory, as adopted in Reflection, is not effective because the agent may have difficulty identifying where it went wrong due to the length of the trajectory.\nTo mitigate this problem, we propose HMR,\nwhere we directly regard the goals generated by the high-level policy as the intermediate goals and encourage the low-level policy to complete this goal, even if the goal is wrong. That is, the low-level policy needs to complete the goals proposed by the high-level policy, regardless of its correctness. In other words, the action generation process can be written as,\nwhere and are low- and high-level reflections augmented states.\nIn this way, the reflection process is divided into two smaller but more specific reflection processes, corresponding to the low-level and high-level policies. The reason behind this is that shorter reflection inputs (i.e., sub-trajectories as opposed to full trajectories) lead to better reflection performances because irrelevant information is filtered. As a result, the full trajectory is decomposed into one goal trajectory, which maintains the history of proposed goals, and multiple sub-trajectories, with each one corresponding to a sequence of actions to complete a goal.\nMemory and retrieval process:\nIn RAHL, we maintain a short-term memory that stores the trajectory so far in the current episode and a long-term memory that stores reflections from past experiences. Long-term memory is divided into two parts: high- and low-level memories, which store reflections from high- and low-level reflections, respectively. The long-term memory stores reflections in key-value pairs, with keys being the task or sub-task description and values being the corresponding reflections.\nOnce a new query comes to high- or low-level memory, the retrieval process will calculate its embeddings and subsequently calculate the distance between the query\u2019s embeddings and the keys\u2019 embeddings to find the top 2 matches. Then, the corresponding values of these 2 matches, i.e., the reflections, will be used as part of the prompt."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "IV Performance Evaluation",
|
| 39 |
+
"text": "In this section, we will introduce the three environments for evaluation in Sect. IV-A ###reference_###, and the experiment/simulation setup and comparison plan in Sect. IV-B ###reference_###. Then we will discuss the results obtained and the results of the ablation study in Sect. IV-C ###reference_### and IV-D ###reference_###, respectively."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "IV-A Environments",
|
| 45 |
+
"text": "We conducted experiments in three environments\u2013ALFWorld [27 ###reference_b27###], Webshop [28 ###reference_b28###], and HotpotQA [29 ###reference_b29###]. These environments are not typical machine learning datasets; rather, they are interactive environments that generate observations and receive actions in the form of texts. Although these may seem to be decoupled from robotic planning, we argue that the action space in these environments can be regarded as plans. With a plan, a low-level executor (e.g., a robot hand) can ground this plan to a sequence of control signals (e.g., move the index finger to the left by 1cm). Our work mainly focuses on the planning part, where we generate orders/commands for the robot to perform, and a low-level algorithm will translate these orders or commands to the robots\u2019 real action spaces. Therefore, the purpose of these environments is only to test the planning capacity of the proposed RAHL.\nALFWorld: ALFWorld is a household environment that requires an agent to make decisions over multiple steps to complete a task, such as putting a hot apple on the countertop. We follow the setup in Reflexion and run 134 scenarios across six different tasks, including moving a hidden object, cleaning an object, heating or cooling an object, etc. Typically, a task can be decomposed into several sequential sub-tasks.\nWebshop:\nWebshop is an online benchmark environment that tests the agent\u2019s ability to navigate through websites to locate items, select the correct options, and purchase items. There are two types of actions\u2013search for an item on the search page and click a button on the page. The agent will need to extract useful information from the observation and choose the correct option according to the instructions.\n###figure_4### ###figure_5### ###figure_6### HotpotQA: HotpotQA is a Wikipedia-based search simulator with 113K question-answer pairs. The agent needs to search, extract information from the search results, and combine information from multiple searches to obtain the final answer. There are three kinds of actions\u2013search an entity, look up a keyword in the searched content, and finish with an answer."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "IV-B Setup and Comparison",
|
| 51 |
+
"text": "To show the performance of the proposed RAHL, we compare the proposed RAHL with the following frameworks.\nRetroformer [38 ###reference_b38###]: Retroformer is a gradient-based framework, where the LLM is frozen and is used as the policy, while another smaller LM is trained to provide verbal feedback on the decisions based on the reward. The authors used Low Rank (LoRA) fine-tuning [39 ###reference_b39###] to reduce the number of parameters to be fine-tuned. In our comparison, we use for LoRA.\nADaPT [32 ###reference_b32###]: ADaPT will first try to solve the task, and if it fails, it will decompose the task into sub-tasks and try to solve these sub-tasks. The algorithm stops when the number of maximum recursions, , is reached. Although ADaPT does not execute in multiple episodes, it tries to solve the task times. Therefore, we do not count ADaPT\u2019s results as Pass@1 results.\nReflexion [31 ###reference_b31###]: Reflexion adopts ReAct [30 ###reference_b30###] as the bottom-level actor, which can reason about the current situation and generate tractable reasoning traces. At the end of each episode, Reflexion will generate reflections for the full episode following a few reflection examples.\nExpeL [36 ###reference_b36###]: ExpeL learns from past experiences by storing them in memory and retrieving similar experiences from the memory during execution. The retrieval is based on the vector distance between the query and key embeddings. ExpeL also maintains a rule set storing the rules summarized from past experiences.\nUnless otherwise specified, we use GPT-3.5-turbo as the LLM back-end. For Webshop and HotpotQA, we provide two full trajectories of successful execution to the agent as context. For ALFWorld, following Yao et al. [31 ###reference_b31###], we provide two task-specific examples to the agent for each of the six tasks. We also provide examples for the hindsight reflection. Specifically, we provide two examples of the goal-trajectory reflection and one example of the full-trajectory reflection.\nSince ADaPT retries to execute the task when failed, we do not count its performance at #Epi=1 as Pass@1 results."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "IV-C Results and Analysis",
|
| 57 |
+
"text": "The evaluation results of the three environments are presented in Table I ###reference_###.\nWe can observe that RAHL can achieve decent Pass@1 performance (i.e., the performance at the first episode without any experiences in the past), with only ADaPT achieving a slightly better performance in ALFWorld. The reason is that ADaPT tries to approach a problem multiple times, and if it fails in the first attempt, it will decompose the task into sub-tasks and try to approach it by completing the sub-tasks. Although this seems reasonable, a complex task structure might confuse the LLM and complicate the reasoning process. Therefore, RAHL can outperform ADaPT in the Webshop.\nAnother observation from the results is that Retroformer can achieve decent performances when #Epi=5, which owes to its gradient-based nature. However, since the gradient is not directly applied to the LLM agent but to the reflection agent, the Retroformer is still constrained by the ability of the reflection agent.\n###figure_7### We can also observe that RAHL can outperform Reflexion in both Pass@1 and Pass@4 performances, especially in ALFWorld and Webshop, which typically have long decision-making trajectories. This demonstrates the advantage of the hierarchical structure that RAHL has. We cannot directly observe which reflection approach is better, but we will present more insight on this in Sect. IV-D ###reference_###.\n###figure_8### ###figure_9###"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.4",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "IV-D Ablation Study",
|
| 63 |
+
"text": "To quantify the contribution to the performance of each part of RAHL, we performed an ablation study by comparing RAHL with three different variants of RAHL.\nRAHL-Retry: We provide past failed full trajectories to the agent without any summarization or reflection. Since the trajectories are long, RAHL-Retry will terminate after a few episodes because of exceeding the maximum sequence length of the LLM.\nRAHL-Reflexion: We adopt Reflexion as the reflection technique where the reflection is on the full trajectory.\nRAHL w/o Tag: We let the LLM decide what the next step should be instead of guiding it with tags. In this way, in addition to generating actions in the environment\u2019s action spaces, it can also generate three types of actions: think, propose a goal, and finish a goal. Consequently, the reflection examples have been redesigned to remove the tags.\nNote that in the experiment, RAHL-Retry, RAHL-Reflexion, and RAHL-HMR have the same first episode because they all have RAHL as the decision maker, and the difference among them is the reflection technique. After we obtain the trajectory of the first episode using RAHL, we start directly from the second episode but with different long-term memories.\nThe results are presented in Fig. 3 ###reference_###. We can observe that the RAHL w/o Tag does not achieve comparable performances with other tag-guided methods, indicating that injecting the prior knowledge of humans\u2019 thought processes is helpful for LLM agents\u2019 decision-making. Moreover, RAHL-Reflexion and RAHL-HMR can outperform RAHL-Retry because experiences in human language can be perceived better by the LLM agent than plain trajectories, not to mention that retry-based methods are not scalable because of the possibility of exceeding the maximum sequence length of the LLMs. Another notable observation is that RAHL-HMR can outperform RAHL-Reflexion, because modular reflections are more efficient in identifying errors agents made, especially in long trajectories and text-heavy environments like ALFWorld (Fig. 3(a) ###reference_sf1###) and HotpotQA (Fig. 3(c) ###reference_sf3###).\nWe also present the results obtained using different LLM backends in Fig. 4 ###reference_###. It can be observed that GPT-4 can indeed bring about a significant performance improvement for simpler environments such as ALFWorld and Webshop. The reason for this improvement is that GPT-4 has better knowledge about the world because it is trained on more and newer data than GPT-3.5. In other words, tasks in ALFWorld, a household environment, and Webshop, an online shopping environment, can be completed with general human skills and knowledge, while questions in HotpotQA require the agent to search for the information. Furthermore, we can observe that GPT-3.5 with GPT-4 HMR can outperform GPT-3.5, which further demonstrates the importance of reflection.\nAnother interesting finding is that\nGPT-3.5 with GPT-4 HMR can outperform GPT-4 in HotpotQA, indicating that better LLMs do not necessarily lead to better performances in decision-making tasks."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Hardware Experiment",
|
| 69 |
+
"text": "Besides the results obtained on the three benchmark environments, we also performed experiments with the Boston Dynamics SPOT robot dog. The main focus of this experiment is to show the ability of the proposed RAHL framework to be implemented in reality. The task in the experiment is navigation, where the robot needs to navigate to a person in the room next to it. To go to the destination room, the robot needs to look for a door and then look for the person after going through the door.\nThe system used in the experiment and the picture of the SPOT robot are shown in Fig. 5(a) ###reference_sf1###.\nWe design two experiments. In the first test, the robot faces the door in the beginning. The goal of this trial is to test the compatibility of GPT-4-turbo as the image analyst and GPT-4o as the decision-maker. As a result, the robot was able to recognize the door and go through the door to look for the person.\nThe second test increases the difficulty by positioning the robot in a corner of the room. To reach the person, one needs to scan the first room to find the door and then go through the door to find the person. The trial focuses more on the ability of the GPT-4o as the decision-maker. The robot was successful in finding the person in the end after exploring the first room. The trajectory of the second trial is presented in Fig. 5(b) ###reference_sf2###."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "VI Limitation and Discussion",
|
| 75 |
+
"text": "The proposed RAHL is essentially a deterministic policy for decision-making, with HMR being the update of the policy gradient. Though it can achieve decent performance and enhance interpretability, it still underperforms trained large models and has performance bottlenecks\nin complex tasks such as HotpotQA.\nThe reason for this is that LLMs rely heavily on the knowledge base acquired during their training on a large human language corpus. However, this knowledge base might be biased, causing LLMs to be stubborn in certain scenarios. This stubbornness is deep rooted in the knowledge of LLM. To tackle this, we will work on better reflection techniques or methods beyond reflections in the future to conquer this issue."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "7",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "VII Conclusion",
|
| 81 |
+
"text": "In this paper, we introduced Retrieval-Augmented in-context reinforcement Learning (RAHL), an in-context learning framework that decomposes complex tasks into simpler sub-tasks. A high-level policy is responsible for proposing goals that define sub-tasks, while a low-level policy makes decisions to achieve the goals. To enable the framework to improve over multiple episodes, we proposed Hindsight Modular Reflection (HMR), where we introduced low-level reflection and high-level reflection. The proposed framework was evaluated in three environments and the results showed that RAHL can outperform existing frameworks in both Pass@1 performances and multi-episode performances.\nIn the future, we plan to embed RAHL on drones as a planner, with a trained executor to map the plans to robot control signals and evaluate the system\u2019s performance as a whole."
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"appendix": [],
|
| 85 |
+
"tables": {
|
| 86 |
+
"1": {
|
| 87 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\" style=\"font-size:80%;\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T1.4.1.1\" style=\"font-size:113%;\">TABLE I</span>: </span><span class=\"ltx_text\" id=\"S4.T1.5.2\" style=\"font-size:113%;\">Success rate in percentage for three datasets\u2013ALFWorld, Webshop, and HotpotQA. The highlighted results represent the best results in that row. #Epi represents the number of episodes.\n</span></figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.6\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.6.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_tt\" id=\"S4.T1.6.1.1.1\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.6.1.1.1.1\" style=\"font-size:80%;\">Datasets</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_r ltx_border_tt\" id=\"S4.T1.6.1.1.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.2.1\" style=\"font-size:80%;\">#Epi</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.6.1.1.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.3.1\" style=\"font-size:80%;\">Retroformer</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.6.1.1.4\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.4.1\" style=\"font-size:80%;\">ADaPT</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.6.1.1.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.5.1\" style=\"font-size:80%;\">Reflexion</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.6.1.1.6\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.6.1\" style=\"font-size:80%;\">ExpeL</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.T1.6.1.1.7\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.1.1.7.1\" style=\"font-size:80%;\">RAHL</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.6.2.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.6.2.1.1\" rowspan=\"2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.6.2.1.1.1\" style=\"font-size:80%;\">Alfworld</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.6.2.1.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.2.1\" style=\"font-size:80%;\">1</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.2.1.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.3.1\" style=\"font-size:80%;\">62%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.2.1.4\" rowspan=\"2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.4.1\" style=\"font-size:80%;\">71%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.2.1.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.5.1\" style=\"font-size:80%;\">54%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.2.1.6\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.6.1\" style=\"font-size:80%;\">59%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.2.1.7\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.2.1.7.1\" style=\"font-size:80%;background-color:#D1D1FF;\">67%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.3.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.6.3.2.1\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.3.2.1.1\" style=\"font-size:80%;\">5</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.3.2.2\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.3.2.2.1\" style=\"font-size:80%;background-color:#D1D1FF;\">100%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.3.2.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.3.2.3.1\" style=\"font-size:80%;\">78%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.3.2.4\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.3.2.4.1\" style=\"font-size:80%;\">64%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.3.2.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.3.2.5.1\" style=\"font-size:80%;\">87%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.4.3\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.6.4.3.1\" rowspan=\"2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.6.4.3.1.1\" style=\"font-size:80%;\">Webshop</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.6.4.3.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.2.1\" style=\"font-size:80%;\">1</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.4.3.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.3.1\" style=\"font-size:80%;\">33%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.4.3.4\" rowspan=\"2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.4.1\" style=\"font-size:80%;\">45%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.4.3.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.5.1\" style=\"font-size:80%;\">19%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.4.3.6\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.6.1\" style=\"font-size:80%;\">35%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.4.3.7\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.4.3.7.1\" style=\"font-size:80%;background-color:#D1D1FF;\">69%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.5.4\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r\" id=\"S4.T1.6.5.4.1\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.5.4.1.1\" style=\"font-size:80%;\">5</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.5.4.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.5.4.2.1\" style=\"font-size:80%;\">36%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.5.4.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.5.4.3.1\" style=\"font-size:80%;\">28%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.5.4.4\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.5.4.4.1\" style=\"font-size:80%;\">41%</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.6.5.4.5\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.5.4.5.1\" style=\"font-size:80%;background-color:#D1D1FF;\">83%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.6.5\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r ltx_border_t\" id=\"S4.T1.6.6.5.1\" rowspan=\"2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.6.6.5.1.1\" style=\"font-size:80%;\">HotpotQA</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_r ltx_border_t\" id=\"S4.T1.6.6.5.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.2.1\" style=\"font-size:80%;\">1</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.6.5.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.3.1\" style=\"font-size:80%;\">34%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.6.5.4\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.4.1\" style=\"font-size:80%;\">-</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.6.5.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.5.1\" style=\"font-size:80%;\">35%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.6.5.6\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.6.1\" style=\"font-size:80%;\">28%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.6.6.5.7\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.6.5.7.1\" style=\"font-size:80%;background-color:#D1D1FF;\">37%</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.6.7.6\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_row ltx_border_bb ltx_border_r\" id=\"S4.T1.6.7.6.1\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.1.1\" style=\"font-size:80%;\">5</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.6.7.6.2\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.2.1\" style=\"font-size:80%;\">53%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.6.7.6.3\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.3.1\" style=\"font-size:80%;\">-</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.6.7.6.4\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.4.1\" style=\"font-size:80%;\">47%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.6.7.6.5\" style=\"padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.5.1\" style=\"font-size:80%;\">39%</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.6.7.6.6\" style=\"background-color:#D1D1FF;padding-left:3.0pt;padding-right:3.0pt;\"><span class=\"ltx_text\" id=\"S4.T1.6.7.6.6.1\" style=\"font-size:80%;background-color:#D1D1FF;\">57%</span></td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 88 |
+
"capture": "TABLE I: Success rate in percentage for three datasets\u2013ALFWorld, Webshop, and HotpotQA. The highlighted results represent the best results in that row. #Epi represents the number of episodes.\n"
|
| 89 |
+
}
|
| 90 |
+
},
|
| 91 |
+
"image_paths": {
|
| 92 |
+
"1(a)": {
|
| 93 |
+
"figure_path": "2408.06520v2_figure_1(a).png",
|
| 94 |
+
"caption": "(a)\nFigure 1: (a) Demonstration of a typical task in ALFWorld. The high-level reflection attempts to correct the errors made by the high-level policy, while the low-level reflection corrects the errors from the low-level policy. (b) Flow diagram of the proposed Retrieval-Augmented Hierarchical in-context reinforcement Learning (RAHL) and Hindsight Modular Reflection (HMR).",
|
| 95 |
+
"url": "http://arxiv.org/html/2408.06520v2/x1.png"
|
| 96 |
+
},
|
| 97 |
+
"1(b)": {
|
| 98 |
+
"figure_path": "2408.06520v2_figure_1(b).png",
|
| 99 |
+
"caption": "(b)\nFigure 1: (a) Demonstration of a typical task in ALFWorld. The high-level reflection attempts to correct the errors made by the high-level policy, while the low-level reflection corrects the errors from the low-level policy. (b) Flow diagram of the proposed Retrieval-Augmented Hierarchical in-context reinforcement Learning (RAHL) and Hindsight Modular Reflection (HMR).",
|
| 100 |
+
"url": "http://arxiv.org/html/2408.06520v2/x2.png"
|
| 101 |
+
},
|
| 102 |
+
"2": {
|
| 103 |
+
"figure_path": "2408.06520v2_figure_2.png",
|
| 104 |
+
"caption": "Figure 2: The decision-making process of RAHL.",
|
| 105 |
+
"url": "http://arxiv.org/html/2408.06520v2/x3.png"
|
| 106 |
+
},
|
| 107 |
+
"3(a)": {
|
| 108 |
+
"figure_path": "2408.06520v2_figure_3(a).png",
|
| 109 |
+
"caption": "(a) ALFWorld success rate.\nFigure 3: Success rate over five episodes in three datasets/environments using GPT-3.5-turbo. The results and confidence intervals are obtained over ten runs.",
|
| 110 |
+
"url": "http://arxiv.org/html/2408.06520v2/x4.png"
|
| 111 |
+
},
|
| 112 |
+
"3(b)": {
|
| 113 |
+
"figure_path": "2408.06520v2_figure_3(b).png",
|
| 114 |
+
"caption": "(b) Webshop success rate.\nFigure 3: Success rate over five episodes in three datasets/environments using GPT-3.5-turbo. The results and confidence intervals are obtained over ten runs.",
|
| 115 |
+
"url": "http://arxiv.org/html/2408.06520v2/x5.png"
|
| 116 |
+
},
|
| 117 |
+
"3(c)": {
|
| 118 |
+
"figure_path": "2408.06520v2_figure_3(c).png",
|
| 119 |
+
"caption": "(c) HotpotQA success rate.\nFigure 3: Success rate over five episodes in three datasets/environments using GPT-3.5-turbo. The results and confidence intervals are obtained over ten runs.",
|
| 120 |
+
"url": "http://arxiv.org/html/2408.06520v2/x6.png"
|
| 121 |
+
},
|
| 122 |
+
"4": {
|
| 123 |
+
"figure_path": "2408.06520v2_figure_4.png",
|
| 124 |
+
"caption": "Figure 4: Success rates in percentage obtained with different LLMs. GPT-3.5 and GPT-4 indicate the decision-making and reflection are performed with the same type of LLMs, while GPT-3.5 with GPT-4 HMR indicates GPT-3.5 is used for decision-making while GPT-4 is used for reflection. The results and confidence intervals are obtained over ten runs.",
|
| 125 |
+
"url": "http://arxiv.org/html/2408.06520v2/x7.png"
|
| 126 |
+
},
|
| 127 |
+
"5(a)": {
|
| 128 |
+
"figure_path": "2408.06520v2_figure_5(a).png",
|
| 129 |
+
"caption": "(a)\nFigure 5: (a) The diagram of the system designed for the experiment with Boston Dynamics SPOT. (b) Robot (green spearhead with the head pointing in the camera\u2019s direction) trajectory in the rooms to locate the target person (red dot).",
|
| 130 |
+
"url": "http://arxiv.org/html/2408.06520v2/x8.png"
|
| 131 |
+
},
|
| 132 |
+
"5(b)": {
|
| 133 |
+
"figure_path": "2408.06520v2_figure_5(b).png",
|
| 134 |
+
"caption": "(b)\nFigure 5: (a) The diagram of the system designed for the experiment with Boston Dynamics SPOT. (b) Robot (green spearhead with the head pointing in the camera\u2019s direction) trajectory in the rooms to locate the target person (red dot).",
|
| 135 |
+
"url": "http://arxiv.org/html/2408.06520v2/x9.png"
|
| 136 |
+
}
|
| 137 |
+
},
|
| 138 |
+
"validation": true,
|
| 139 |
+
"references": [],
|
| 140 |
+
"url": "http://arxiv.org/html/2408.06520v2"
|
| 141 |
+
}
|
20241004/2408.10902v3.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2409.04005v2.json
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Qihoo-T2X: An Efficient Proxy-Tokenized Diffusion Transformer for Text-to-Any-Task",
|
| 3 |
+
"abstract": "The global self-attention mechanism in diffusion transformers involves redundant computation due to the sparse and redundant nature of visual information, and the attention map of tokens within a spatial window shows significant similarity.\nTo address this redundancy, we propose the Proxy-Tokenized Diffusion Transformer (PT-DiT), which employs sparse representative token attention (where the number of representative tokens is much smaller than the total number of tokens) to model global visual information efficiently.\nSpecifically, within each transformer block, we compute an averaging token from each spatial-temporal window to serve as a proxy token for that region.\nThe global semantics are captured through the self-attention of these proxy tokens and then injected into all latent tokens via cross-attention.\nSimultaneously, we introduce window and shift window attention to address the limitations in detail modeling caused by the sparse attention mechanism.\nBuilding on the well-designed PT-DiT, we further develop the Qihoo-T2X family, which includes a variety of models for T2I, T2V, and T2MV tasks.\nExperimental results show that PT-DiT achieves competitive performance while reducing the computational complexity in both image and video generation tasks (e.g., a 49% reduction compared to DiT and a 34% reduction compared to PixArt-). The visual exhibition of Qihoo-T2X is available at https://360cvgroup.github.io/Qihoo-T2X/.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "###figure_1### Recent advancements in core diffusion models, including Sora (OpenAI, 2024 ###reference_b28###), Kling (Kuaishou, 2024 ###reference_b18###), Stable Diffusion 3 (Stability AI, 2024 ###reference_b40###), PixArt- (Chen et al., 2023 ###reference_b6###; 2024a ###reference_b7###; 2024b ###reference_b8###), Vidu (Shengshu AI, 2024 ###reference_b37###), Lumina-T2X (Gao et al., 2024 ###reference_b12###), Flux (BlackForestlabs AI, 2024 ###reference_b4###), and CogVideoX (Yang et al., 2024 ###reference_b47###), have led to significant achievements in the creation of photo-realistic image and video. Transformer-based models such as Sora and Vidu have demonstrated the ability to generate high-quality samples at arbitrary resolutions. These models also adhere strongly to scaling laws, achieving superior performance as parameter sizes increase. Additionally, Lumina-T2X has shown uniformity in performing various generation tasks, further validating the potential of the transformer-based architectures in diffusion models.\nHowever, the quadratic complexity of global self-attention concerning sequence length increases the computational cost of the Diffusion Transformer, leading to practical challenges such as longer generation times and higher training costs.\nThis issue also hinders the application of DiT to high-quality video generation. For example, while 3D attention-based approaches(Xu et al., 2024 ###reference_b44###; Yang et al., 2024 ###reference_b47###; Lab & etc., 2024 ###reference_b19###; Gao et al., 2024 ###reference_b12###) have demonstrated superiority over 2D spatial attention combined with 1D temporal attention counterparts(Zheng et al., 2024 ###reference_b51###; Ma et al., 2024b ###reference_b27###; Bar-Tal et al., 2024 ###reference_b3###; Blattmann et al., 2023 ###reference_b5###; Lu et al., 2023 ###reference_b25###), the extensive computational demands limit their scalability for higher-resolution and longer video generation.\nCurrent studies (Han et al., 2023 ###reference_b14###; Koner et al., 2024 ###reference_b17###; Yu et al., 2024 ###reference_b48###) in visual understanding and recognition have highlighted that global attention mechanisms often exhibit redundancy due to the sparse and repetitive nature of visual information. Specifically, by visualizing the attention map, we observe that the attention of tokens within the same window is similar for spatially distant tokens, while differing for spatially neighboring tokens, as illustrated in Fig. 3 ###reference_###.\nThis observation indicates that the dense long-range attention, which triggers significant computational overhead, is redundant.\nThus, reducing this redundancy is believed to enhance the efficiency of Diffusion Transformers in generating higher-resolution images and longer videos.\nIn this paper, we propose the Proxy-Tokenized Diffusion Transformer (PT-DiT) and further present the Qihoo-T2X series, which includes both Text-to-Image, Text-to-Video, and Text-to-MultiView generation models.\nTo address the redundancy of visual information, PT-DiT employs proxy-tokenized attention instead of a global attention mechanism to reduce the computational complexity of visual token interaction.\nSpecifically, we first recover the spatial and temporal relationships of the token sequence through a reshaping operation.\nGiven the similarity of visual information within localized spatial regions and temporal frames, we calculate an averaging token from each spatial-temporal window as a representative token, forming a set of proxy tokens.\nThe interaction and broadcasting of visual global information are then achieved through self-attention among proxy tokens and cross-attention between proxy tokens and all latent tokens.\nAdditionally, to enhance the texture modeling capabilities, we introduce window attention and incorporate shift-window attention, similar to Swin Transformer (Liu et al., 2021 ###reference_b24###), to avoid lattice artifacts as shown in Fig. 10 ###reference_###.\nWith the well-designed proxy-tokenized attention, PT-DiT can be adapted to both image and video generation tasks without structural adjustments. For image generation, as shown in Fig. 2 ###reference_###, compared to PixArt- (Chen et al., 2023 ###reference_b6###), our method achieves an approximate 33% reduction in computational complexity GFLOPs under the same parameter scale. For video generation, in contrast to 2D spatial and 1D temporal attention, which has limited spatial-temporal modeling, and 3D full-attention, which suffers from high computational complexity, PT-DiT can efficiently and comprehensively extracts 3D information, benefiting from proxy token interaction mechanisms.\n###figure_2### Experimental results demonstrate that our method achieves competitive performance with significant efficiency. As shown in Fig. 1 ###reference_###, Qihoo-T2I can generate high-quality and high-fidelity images while closely adhering to the provided text instructions. Meanwhile, for the image generation task, PT-DiT\u2019s computational complexity is 51% of DiT and 66% of PixArt- for the same parameter size. For the video generation task, despite having 3 million more parameters than EasyAnimateV4, the PT-DiT/H\u2019s computational complexity is only 82% of EasyAnimateV4 (Xu et al., 2024 ###reference_b44###) and 77% of CogVideoX (Yang et al., 2024 ###reference_b47###) for the same parameter size. Overall, using the standard 3D VAE settings ( spatial downsampling rate and temporal downsampling rate), experimental tests indicate that we can train the PT-DiT/XL (1.1B) model for images at a resolution of or for video at a resolution of on the 64GB Ascend 910B."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related work",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Method",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Redundancy Analysis",
|
| 27 |
+
"text": "###figure_3### Due to the sparsity and redundancy of visual information, global attention mechanisms in existing DiTs exhibit significant redundancy and computational complexity, particularly when processing high-resolution images and longer videos.\nWe analyze this computational redundancy by visualizing the self-attention maps.\nSpecifically, we examine the attention map of self-attention in PixArt- at a resolution of , as shown on the left in Fig. 3 ###reference_###.\nThe attention map for latent codes within a spatial window is then assembled, as depicted on the right side of Fig. 3 ###reference_### (where the vertical axis represents different tokens in a window, and the horizontal axis represents the correlation with all latent tokens).\nIt is evident that the attention maps for different tokens within the same window are nearly uniform for spatially distant tokens (i.e., at the same horizontal position, the vertical values are almost identical).\nMoreover, window tokens exhibit varying attention to spatially neighboring tokens.\nThis suggests that computing attention for all latent tokens is redundant, while attention for spatially neighboring tokens is critical.\nConsequently, we propose a sparse attention strategy that samples limited proxy tokens from each window to perform self-attention, thereby reducing redundancy and decreasing complexity. Additionally, the association between spatially neighboring tokens is established through window attention.\nFurther details are elaborated in Sec. 3.2 ###reference_###."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Architecture of PT-DiT",
|
| 33 |
+
"text": "###figure_4### As shown in Fig. 4 ###reference_###, our proposed Proxy-Tokenized Diffusion Transformer (PT-DiT) introduces the proxy-tokenized mechanism to reduce the number of tokens involved in computing global self-attention, thereby efficiently establishing global visual associations.\nSpecifically, the latent code is passed through path embedding to obtain the latent code sequence . Subsequently, we add 3D positional encoding to and feed it into the well-designed Proxy-Tokenized Blocks (PT-Block).\nCompared to the vanilla diffusion transformer block, the PT-Block introduces a Global Information Interaction Module (GIIM) and a Texture Complement Module (TCM). The GIIM facilitates efficient interaction among all latent codes using sparse proxy-tokenized mechanisms, while the TCM further refines local detail through window attention and shift-window attention. Below, we describe the GIIM and TCM in detail."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.2.1",
|
| 37 |
+
"parent_section_id": "3.2",
|
| 38 |
+
"section_name": "3.2.1 Global Information Interaction Module",
|
| 39 |
+
"text": "Given a series of latent tokens, we first sample a set of proxy tokens based on their spatial and temporal priors. Each proxy token represents a localization within the image or video and interacts with proxy tokens in other regions to establish global relationships. Then, the information contained in proxy tokens is propagated to latent tokens, enabling efficient global visual information interaction.\nSpecifically, we reshape the latent code sequence to , where , and\n denotes the frame, height, and width of video or image () in the latent space after patch embedding, thereby recovering its temporal and spatial connections. The set of proxy tokens is calculated from each window of size using the averaging operation. The parameters , , and indicate the compression ratios for frame, height, and width, respectively.\nEach proxy token represents tokens, modeling global information with the other proxy token through self-attention. Subsequently, cross-attention is performed to propagate the global visual information into all latent tokens , where the serves as the Query and the proxy tokens serve as the Key and Value. The above process is mathematically expressed as follows:\nwhere refers to the averaging operation applied to tokens within the same window to extract proxy tokens, and and represent the cross-attention and self-attention operations, respectively. Besides, we introduce a linear layer with zero initialization to enhance training stability.\nThis approach allows the PT-Block to achieve efficient global information modeling and avoids the computational overhead caused by redundant computations in self-attention. We will analyze the computational complexity advantages of GIIM further in Sec. 3.3 ###reference_###."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2.2",
|
| 43 |
+
"parent_section_id": "3.2",
|
| 44 |
+
"section_name": "3.2.2 Texture Complement Module",
|
| 45 |
+
"text": "Due to the characteristics of the sparse proxy tokens interactions, the model\u2019s capacity to capture detailed textures is limited, making it challenging to meet the high-quality demands of generation tasks.\nTo solve this problem, we introduce localized window attention in the Texture Complement Module (TCM). Specifically, the latent tokens are reshaped to and self-attention is computed along the second dimension. Additionally, shift-window attention is integrated into TCM to avoid the \u201cgrid\u201d phenomenon caused by localized window attention. The formula for this process is as follows:\nwhere and denote shift-window attention and window attention respectively. Both window attention and shift-window attention introduce a visual prior to DiT, which aids in the construction of texture details and advances the training of visual generators.\nMoreover, the increase in computation is minimal due to the limited number of tokens in each window. We will analyze this in detail in Sec. 3.3 ###reference_###.\nThen, is reshaped to and fed into Textual Cross-Attention and MLP, similar to DiT."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2.3",
|
| 49 |
+
"parent_section_id": "3.2",
|
| 50 |
+
"section_name": "3.2.3 Compression Ratios",
|
| 51 |
+
"text": "For the image generation task, we find that maintaining the same number of windows across different resolutions is crucial to ensure consistent semantic hierarchies, which aids in training process from low-to-high resolutions.\nAt the same time, the number of windows should be sufficient to prevent semantic richness within a window from causing a single token to inadequately represent the local area. Therefore, we set compression ratios (. , ) to (1, 2, 2), (1, 4, 4), (1, 8, 8), and (1, 16, 16) at 256, 512, 1024, and 2048 resolution respectively.\nIt is worth noting that when the input is an image, and will be set to . For the video generation task, we set = 4 across different resolution to maintain the temporal compression consistent. Owing to token compression in the frame, height and width dimensions, PT-DiT can effectively train a generator for longer videos."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.3",
|
| 55 |
+
"parent_section_id": "3",
|
| 56 |
+
"section_name": "Complexity Analysis",
|
| 57 |
+
"text": "With a small number of representative token attention, PT-DiT reduces the computational redundancy of the original full token self-attention. The advantages of our method in terms of computational complexity are further analyzed theoretically in the following.\nThe computational complexity of self-attention is , computed as follows:\nwhere denotes the length of latent tokens and represents feature dimension.\nSimilarly, the computational complexity of GIIM and TCM is computed as follow:\nObviously, due to the proxy-tokenized strategy, our method provides significant advantages, especially with larger compression ratios (, , ) and longer sequence lengths (). When (, , ) are (1, 2, 2), (1, 4, 4), (1, 8, 8), and (1, 16, 16) and the image resolution are 256 (), 512 (), 1024 (), and 2048 (), our method accounts for only 34.3%, 9.7%, 4.7%, and 2.3% of the total self-attention. In addition, PT-DiT offers even greater benefits for video generation tasks with longer sequence lengths. Experimental analysis is available in Sec. 4.4 ###reference_###."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Experiment",
|
| 63 |
+
"text": ""
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.1",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Experimental Setup",
|
| 69 |
+
"text": "Training Setting.\nDue to limitations in computational resources, we only trained Qihoo-T2I and Qihoo-T2V based on PT-DiT/XL 1.1B.\nFollowing previous methods (Xu et al., 2024 ###reference_b44###; Yang et al., 2024 ###reference_b47###; Chen et al., 2023 ###reference_b6###), we utilize the T5 large language model as the text encoder and train Qihoo-T2I using a low-to-high resolution strategy divided into three stages. Detailed hyper-parameter settings and the model configurations for various scales of PT-DiT are provided in Appendix. A.1 ###reference_###.\nAblation Study.\nWe conduct ablation experiments using a class-conditional version of PT-DiT/S-Class (32M) on the ImageNet (Deng et al., 2009 ###reference_b9###) benchmark at 256 resolution. The AdamW optimizer is utilized with a constant learning rate of 1e-4. We train the models for 400,000 iterations with a batch size of 256, while maintaining an exponential moving average (EMA) of the model weights. During the inference, we set denoising step is 50 and use classifier-free guidance (cfg=6.0)."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Qualitative analysis",
|
| 75 |
+
"text": "###figure_5### Text-to-Image.\nWe provide a qualitative comparison of Qihoo-T2I with existing state-of-the-art Text-to-Image models (e.g., PixArt- and Flux), as shown in Fig. 5 ###reference_###. Qihoo-T2I exhibits competitive performance, generating photo-realistic images that align well with the provided text prompts. Additional samples generated by Qihoo-T2I can be found in the Project Homepage ###reference_###.\nText-to-Video.\nWe also compare Qihoo-T2V with the recently released open-source Text-to-Video models (i.e., EasyAnimateV4 and CogVideoX) at a resolution of 512, achieving comparable results, as depicted in Fig. 6 ###reference_###. More video samples are available in the Project Homepage ###reference_###.\n###figure_6### Text-to-MV.\nWe further explore the effectiveness of PT-DiT on Text-to-MultiView (T2MV) tasks.\nThe trained Qihoo-T2MV is capable of generating images from various viewpoints based on the provided text instruction, showcasing strong spatial consistency, as illustrated in Fig. 7 ###reference_###.\nFor detailed experimental and training setups, please refer to Appendix. A.2 ###reference_###.\n###figure_7###"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.3",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Quantitative analysis",
|
| 81 |
+
"text": "MS-COCO.\nWe conduct experiments to quantitatively evaluate Qihoo-T2I using zero-shot FID-30K on the MS-COCO (Lin et al., 2014 ###reference_b22###) validation dataset, as shown in Table 8(a) ###reference_sf1###. Due to the distribution gap between our collected data and MS-COCO, there is a resulting decrease in FID (Heusel et al., 2017 ###reference_b16###) metrics. Nevertheless, Qihoo-T2I achieves a competitive score of 15.70.\nMSR-VTT and UCF-101.\nWe evaluate Qihoo-T2V on two standard video generation benchmarks, MSR-VTT (Xu et al., 2016 ###reference_b45###) and UCF-101 (Soomro et al., 2012 ###reference_b39###), at a resolution of 256. As shown in Table 8(b) ###reference_sf2###, Qihoo-T2V achieves state-of-the-art results among DiT-based approaches and demonstrates competitive performance compared to U-Net-based approaches. Notably, since CogVideoX, EasyAnimateV4, and Qihoo-T2V all utilize T5 as the text encoder, this creates a gap in CLIPSIM compared to methods that employ the CLIP as the text encoder, such as AnimateDiff, DynamiCrafter, PixelDance, and FancyVideo.\n###table_1### ###figure_8### ###figure_9###"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.4",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Algorithmic Efficiency Comparison",
|
| 87 |
+
"text": "As discussed in Sec. 3.3 ###reference_###, our method effectively reduces complexity. In this section, we further analyze the computational advantages of PT-DiT in T2I and T2V tasks.\nIn the image generation task, similar to Fig. 2 ###reference_###, we conduct comparisons at different parameter scales. With equivalent parameter counts, we compared Lumina-Next (1.7B) to our PT-DiT/H (1.8B), DiT/B (0.13B) and DAM/B (0.13B) to our PT-DiT/B (0.14B), as illustrated in Fig. 8 ###reference_###. As shown on the left side of Fig. 8 ###reference_###, the GFLOPs of PT-DiT/H are significantly lower than Lumina-Next across multiple scales. Specifically, at resolutions of 512 and 2048, PT-DiT/H achieves complexity reduction of respectively 82.0% and 82.5%. Similarly, the right side of Fig. 8 ###reference_### indicates that PT-DiT/B requires 48.6% less computation than DiT/B at a resolution of 1024. Compared to DAM/B, which has an attention computation complexity of , our method exhibits a comparable level of computation complexity across all resolutions.\nIn the video generation task, we assess our model based from two aspects: computational complexity and GPU memory consumption, as illustrated in Fig. 9 ###reference_###. We conduct experiments using two scales of PT-DiT (i.e., PT-DiT/H (1.8B) for a consistent scale comparison and our utilized PT-DiT/XL (1.1B) for training Qihoo-T2V) and select the latest open-source T2V model (i.e., CogVideoX-2B (actual test at 1.7B) and EasyAnimateV4 (1.5B)) as the comparison methods. The left side of Fig. 9 ###reference_### displays the GFLOPs calculated at different resolutions, with the latent code set to a time dimension of 48.\nIt is obvious that, despite having the largest number of parameters, PT-DiT/H exhibits the lowest computational complexity.\nMeanwhile, the computational complexity of PT-DiT/XL employed by Qihoo-T2V is only 50% that of CogVideoX and EasyAnimateV4.\nOn the right side of Fig. 9 ###reference_###, we further compare the GPU memory usage during training with EasyAnimateV4 at a resolution of 512, across different frame counts.\nSince the T2V version of EasyAnimateV4 employs HunyuanDiT with full 3D attention, its memory consumption increases dramatically with the number of video frames. In contrast, PT-DiT, which also utilizes 3D spatial-temporal modeling, experiences only a slight increase in memory consumption due to its well-designed proxy-tokenized attention mechanism.\nThe above experiments demonstrate the potential of PT-DiT for generating longer and higher-resolution videos."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.5",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "Ablation Study",
|
| 93 |
+
"text": "Major Component.\nWe conduct quantitative experiments to assess the effectiveness of the GIIM and TCM proposed in this paper. The absence of either GIIM or TCM results in a substantial performance loss (i.e., 19.30 23.71 or 19.30 69.07). Specifically, without TCM, the model struggles\n###figure_10### to capture fine details, making it challenging to meet generation tasks that demand high-quality detail, leading to a significant decline in performance. Additionally, we investigated the role of shift-window attention through both qualitative evaluation at a resolution of 512 and quantitative analyses at a resolution of 256, as illustrated in Fig. 10 ###reference_### and Table 10(a) ###reference_.sf1### respectively. As anticipated, there is a noticeable decrease (i.e., 19.30 23.59) in FID without shift-window attention, accompanied by pronounced \u201cgrid\u201d phenomena.\nProxy Token Extraction.\nAs illustrated in Table 10(b) ###reference_.sf2###, we explore three methods for obtaining the proxy token: the top-left token, a randomly selected token, and averaging the in-window tokens. A performance gap exists between the Top-Left (20.84) or Random (21.00) selections and the averaging manner (19.30). We believe this gap arises because the random and top-left tokens fail to adequately represent the overall characteristics of the region, compromising the effectiveness of proxy-tokenized attention and leading to performance loss. We use averaging as the default setting.\nGlobal Information Injection.\nDue to the misalignment between the number of proxy tokens and latent tokens, we investigate three schemes for injecting global information into latent tokens: Cross-Attention, Interpolation, and Linear projection, as shown in Table 10(c) ###reference_.sf3###.\nAmong these, interpolation involves applying spatially bilinear interpolation to the proxy tokens, while linear projection aligns proxy tokens with latent tokens through a linear layer.\nSince each latent code can leverage global information from the entire set of proxy tokens, Cross-attention achieves a performance advantage with an FID of 19.30 compared to Linear projection at 20.24 and Interpolation at 21.82.\nCompressed Ratio.\nAs reported in Table 10(d) ###reference_.sf4###, we examine the impact of compression ratio on performance at a resolution of 256. It is evident that when the compression ratio is high, the representative token fails to adequately capture the features of the region for effective global modeling, leading to a noticeable decline in performance (i.e., from 19.30 to 21.24 at (1, 4, 4))."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "Given the sparsity and redundancy of visual information, this paper proposes PT-DiT, which leverages the proxy-tokenized attention mechanism to mitigate the computational redundancy of self-attention in diffusion transformers. A series of representative tokens are calculated based on temporal and spatial priors, with global interactions between them. Additionally, window attention and shifted window attention are introduced to refine the modeling of local details. Our proposed representative token mechanism is particularly effective for video tasks with redundant information, enabling 3D spatio-temporal modeling while avoiding an explosion in computational complexity. Experiments demonstrates that PT-DiT achieves competitive performance while delivering significant efficiency.\nWe further develope the Qihoo-T2X series based on PT-DiT, including models like T2I, T2V, and T2MV. We hope PT-DiT and Qihoo-T2X can provide new insights and references for the field of diffusion transformers."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [
|
| 103 |
+
{
|
| 104 |
+
"section_id": "Appendix 1",
|
| 105 |
+
"parent_section_id": null,
|
| 106 |
+
"section_name": "Appendix A Appendix",
|
| 107 |
+
"text": "We collect a total of 50M data points for the training set, including 32M images with an aesthetic score of 5.5 or higher from Laion (Schuhmann et al., 2022 ###reference_b36###) and 18M high-resolution, high-quality datasets that we constructed. During the high-resolution training phase, we exclusively use 18M high-quality data.\nWe train Qihoo-T2V by progressing through three stages starting from stage 1 of Qihoo-T2I, with detailed hyper-parameters shown in Table 3 ###reference_###. The WebVid 10M (Bain et al., 2021 ###reference_b1###) dataset is employed as the 256-resolution video training data. Additionally, we collect 3M high-resolution, high-quality video samples from the Internet to train the high-resolution video generator. The training objective for Qihoo-T2X is v-prediction, with an extracted text token length of 120. During the inference phase, the denoising steps are set to 50, and the scale of classifier-free guidance is set to 6.0. The specific parameter configurations for various scales of PT-DiT are presented in Table LABEL:model_config.\nBasic setting. Multi-view images of 3D objects can be interpreted as videos of static objects. We utilize a subset of approximately 40k samples from G-Objaverse (Qiu et al., 2024 ###reference_b32###), following the VideoMV (Zuo et al., 2024 ###reference_b53###), which is rendered as video data to train our Qihoo-T2MV model. Each object is rendered with a uniformly distributed azimuth from 0\u00b0\u2009 to 360\u00b0\u2009 and an elevation ranging from 5\u00b0\u2009 to 30\u00b0\u2009, resulting in a video.\nTraining setting. Following previous works (Zuo et al., 2024 ###reference_b53###; Shi et al., 2023 ###reference_b38###), we only accept text instruction as input to generate the Multi-View images of 3D object without additional reference images and camera parameters. The QIhoo-T2MV is trained from stage 2 of the Qihoo-T2I, with a bacthsize of 128 and 20k iterations. The other hyperparameters and experimental settings are the same as QIhoo-T2I."
|
| 108 |
+
}
|
| 109 |
+
],
|
| 110 |
+
"tables": {
|
| 111 |
+
"1": {
|
| 112 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>The quantitative evaluation of the Text-to-Image (a) and Text-to-Video (b) tasks.</figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F8.sf1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.F8.sf1.2\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.1.1\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.F8.sf1.1.1.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Method</th>\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.F8.sf1.1.1.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">FID-30k\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.3.1\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.F8.sf1.2.3.1.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">DALL-E 2 <cite class=\"ltx_cite ltx_citemacro_citep\">(Ramesh et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib33\" title=\"\">2022</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf1.2.3.1.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10.39</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.4.2\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.4.2.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">SD <cite class=\"ltx_cite ltx_citemacro_citep\">(Rombach et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib34\" title=\"\">2022</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.4.2.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">8.73</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.5.3\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.5.3.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Imagen <cite class=\"ltx_cite ltx_citemacro_citep\">(Saharia et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib35\" title=\"\">2022</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.5.3.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">7.27</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.6.4\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.6.4.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">RAPHAEL <cite class=\"ltx_cite ltx_citemacro_citep\">(Xue et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib46\" title=\"\">2024</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.6.4.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">6.61</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.7.5\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.7.5.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Kolors <cite class=\"ltx_cite ltx_citemacro_citep\">(Team, <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib42\" title=\"\">2024</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.7.5.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">23.15</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.2\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.2.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">PixArt- <cite class=\"ltx_cite ltx_citemacro_citep\">(Chen et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib6\" title=\"\">2023</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.2.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10.65</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.8.6\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F8.sf1.2.8.6.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Flux.1-dev<cite class=\"ltx_cite ltx_citemacro_citep\">(BlackForestlabs AI, <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib4\" title=\"\">2024</a>)</cite>\n</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf1.2.8.6.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">22.76</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf1.2.9.7\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row ltx_border_bb ltx_border_t\" id=\"S4.F8.sf1.2.9.7.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Qihoo-T2I</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb ltx_border_t\" id=\"S4.F8.sf1.2.9.7.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">15.70</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(a) </span>Quantitative evaluation on the MS-COCO FID-30K scores (zero-shot).</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F8.sf2\">\n<table class=\"ltx_tabular ltx_centering ltx_align_middle\" id=\"S4.F8.sf2.5\">\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.6.1\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_border_tt\" id=\"S4.F8.sf2.5.6.1.1\" rowspan=\"2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text\" id=\"S4.F8.sf2.5.6.1.1.1\">Method</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_tt\" id=\"S4.F8.sf2.5.6.1.2\" rowspan=\"2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text\" id=\"S4.F8.sf2.5.6.1.2.1\">Arc</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_tt\" id=\"S4.F8.sf2.5.6.1.3\" rowspan=\"2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text\" id=\"S4.F8.sf2.5.6.1.3.1\">Data</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_tt\" colspan=\"3\" id=\"S4.F8.sf2.5.6.1.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">UCF-101</td>\n<td class=\"ltx_td ltx_nopad_l ltx_align_center ltx_border_tt\" colspan=\"2\" id=\"S4.F8.sf2.5.6.1.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">MSR-VTT</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.5\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.1.1.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">FVD()</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.2.2.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">IS()</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.F8.sf2.3.3.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">FID()</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.4.4.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">FVD()</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.5.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">CLIPSIM()</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.7.2\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_border_t\" id=\"S4.F8.sf2.5.7.2.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">AnimateDiff <cite class=\"ltx_cite ltx_citemacro_citep\">(Guo et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib13\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.7.2.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">U-Net</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.F8.sf2.5.7.2.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.7.2.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">584.85</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.7.2.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">37.01</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.F8.sf2.5.7.2.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">61.24</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.7.2.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">628.57</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.7.2.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">0.2881</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.8.3\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left\" id=\"S4.F8.sf2.5.8.3.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">DynamiCrafter <cite class=\"ltx_cite ltx_citemacro_citep\">(Xing et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib43\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.8.3.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">U-Net</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.8.3.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.8.3.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.8.3.4.1\">404.50</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.8.3.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">41.97</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.8.3.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.8.3.6.1\">32.35</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.8.3.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.8.3.7.1\">219.31</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.8.3.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">0.2659</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.9.4\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left\" id=\"S4.F8.sf2.5.9.4.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">PixelDance <cite class=\"ltx_cite ltx_citemacro_citep\">(Zeng et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib49\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.9.4.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">U-Net</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.9.4.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.9.4.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.9.4.4.1\">242.82</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.9.4.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.9.4.5.1\">42.10</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.9.4.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">49.36</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.9.4.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">381.00</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.9.4.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.9.4.8.1\">0.3125</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.10.5\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left\" id=\"S4.F8.sf2.5.10.5.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">FancyVideo <cite class=\"ltx_cite ltx_citemacro_citep\">(Feng et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib11\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.10.5.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">U-Net</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.10.5.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.10.5.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">412.64</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.10.5.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.10.5.5.1\">43.66</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.10.5.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.10.5.6.1\">47.01</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.10.5.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.10.5.7.1\">333.52</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.10.5.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.10.5.8.1\">0.3076</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.11.6\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_border_t\" id=\"S4.F8.sf2.5.11.6.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">CogVideoX-2B<cite class=\"ltx_cite ltx_citemacro_citep\">(Yang et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib47\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.11.6.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">DiT</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.F8.sf2.5.11.6.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">35M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.11.6.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.11.6.4.1\">680.11</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.11.6.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">33.44</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.F8.sf2.5.11.6.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.11.6.6.1\">62.57</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.11.6.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.11.6.7.1\">418.14</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F8.sf2.5.11.6.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.11.6.8.1\">0.2318</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.12.7\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left\" id=\"S4.F8.sf2.5.12.7.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">EasyAnimateV4 <cite class=\"ltx_cite ltx_citemacro_citep\">(Xu et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib44\" title=\"\">2024</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.12.7.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">DiT</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.12.7.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">12M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.12.7.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">694.80</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.12.7.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.12.7.5.1\">44.09</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_r\" id=\"S4.F8.sf2.5.12.7.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">92.33</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.12.7.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">568.99</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F8.sf2.5.12.7.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">0.2285</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F8.sf2.5.13.8\">\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.1\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">Qihoo-T2V</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.2\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">DiT</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.F8.sf2.5.13.8.3\" style=\"padding-left:0.3pt;padding-right:0.3pt;\">10M</td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.4\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.13.8.4.1\">384.03</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.5\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.5.13.8.5.1\">35.19</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.F8.sf2.5.13.8.6\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.13.8.6.1\">51.95</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.7\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.13.8.7.1\">375.23</span></td>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F8.sf2.5.13.8.8\" style=\"padding-left:0.3pt;padding-right:0.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.5.13.8.8.1\">0.2349</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(b) </span>Quantitative evaluation on the UCF-101 <cite class=\"ltx_cite ltx_citemacro_citep\">(Soomro et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib39\" title=\"\">2012</a>)</cite> and MSR-VTT <cite class=\"ltx_cite ltx_citemacro_citep\">(Xu et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.04005v2#bib.bib45\" title=\"\">2016</a>)</cite>. The best and second performing metrics are highlighted in <span class=\"ltx_text ltx_font_bold\" id=\"S4.F8.sf2.8.1\">bold</span> and <span class=\"ltx_text ltx_framed ltx_framed_underline\" id=\"S4.F8.sf2.9.2\">underline</span> respectively.</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_break\"></div>\n</div>\n</figure>",
|
| 113 |
+
"capture": "Table 1: The quantitative evaluation of the Text-to-Image (a) and Text-to-Video (b) tasks."
|
| 114 |
+
},
|
| 115 |
+
"2": {
|
| 116 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Ablation study on PT-DiT/S-Class. Models are trained for 400k iterations.</figcaption><div class=\"ltx_flex_figure\">\n<div class=\"ltx_flex_cell ltx_flex_size_3\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F10.sf1\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.F10.sf1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf1.1.1\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.F10.sf1.1.1.2\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">Structure</th>\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.F10.sf1.1.1.1\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">FID-50k\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf1.1.2.1\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.F10.sf1.1.2.1.1\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">w/o GIIM</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_t\" id=\"S4.F10.sf1.1.2.1.2\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">23.71</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf1.1.3.2\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row\" id=\"S4.F10.sf1.1.3.2.1\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">w/o SWA</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center\" id=\"S4.F10.sf1.1.3.2.2\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">23.59</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf1.1.4.3\">\n<th class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.F10.sf1.1.4.3.1\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">w/o TCM</th>\n<td class=\"ltx_td ltx_nopad_l ltx_nopad_r ltx_align_center ltx_border_bb\" id=\"S4.F10.sf1.1.4.3.2\" style=\"padding-left:1.7pt;padding-right:1.7pt;\">69.07</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(a) </span>Major component.</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_cell ltx_flex_size_3\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F10.sf2\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.F10.sf2.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf2.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.F10.sf2.1.1.2\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.F10.sf2.1.1.1\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">FID-50k\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf2.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.F10.sf2.1.2.1.1\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">Average</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.F10.sf2.1.2.1.2\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">19.30</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf2.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.F10.sf2.1.3.2.1\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">Top-Left</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.F10.sf2.1.3.2.2\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">20.84</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf2.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.F10.sf2.1.4.3.1\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">Random</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.F10.sf2.1.4.3.2\" style=\"padding-left:6.0pt;padding-right:6.0pt;\">21.00</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(b) </span>Proxy token extraction.</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_cell ltx_flex_size_3\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F10.sf3\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.F10.sf3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf3.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.F10.sf3.1.1.2\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">Method</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.F10.sf3.1.1.1\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">FID-50k\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf3.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.F10.sf3.1.2.1.1\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">Cross-Attention</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.F10.sf3.1.2.1.2\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">19.30</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf3.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.F10.sf3.1.3.2.1\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">Interpolate</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.F10.sf3.1.3.2.2\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">21.82</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf3.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.F10.sf3.1.4.3.1\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">Linear</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.F10.sf3.1.4.3.2\" style=\"padding-left:5.4pt;padding-right:5.4pt;\">20.24</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(c) </span>Global information injection.</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_break\"></div>\n<div class=\"ltx_flex_cell ltx_flex_size_1\">\n<figure class=\"ltx_figure ltx_figure_panel ltx_align_center\" id=\"S4.F10.sf4\">\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"S4.F10.sf4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf4.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.F10.sf4.1.1.2\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">Ratio</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"S4.F10.sf4.1.1.1\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">FID-50k\n</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.F10.sf4.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.F10.sf4.1.2.1.1\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">1, 2, 2</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.F10.sf4.1.2.1.2\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">19.30</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf4.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.F10.sf4.1.3.2.1\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">1, 4, 4</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.F10.sf4.1.3.2.2\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">21.24</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.F10.sf4.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.F10.sf4.1.4.3.1\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">1, 8, 8</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.F10.sf4.1.4.3.2\" style=\"padding-left:3.4pt;padding-right:3.4pt;\">20.43</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_figure\">(d) </span>Compressed ratio.</figcaption>\n</figure>\n</div>\n<div class=\"ltx_flex_break\"></div>\n</div>\n</figure>",
|
| 117 |
+
"capture": "Table 2: Ablation study on PT-DiT/S-Class. Models are trained for 400k iterations."
|
| 118 |
+
},
|
| 119 |
+
"3": {
|
| 120 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A1.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>The training setups of Qihoo-T2I and Qihoo-T2V</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A1.T3.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"A1.T3.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_tt\" colspan=\"5\" id=\"A1.T3.1.1.1.1\" style=\"padding-left:2.3pt;padding-right:2.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"A1.T3.1.1.1.1.1\">Text-to-Image</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"5\" id=\"A1.T3.1.1.1.2\" style=\"padding-left:2.3pt;padding-right:2.3pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"A1.T3.1.1.1.2.1\">Text-to-Video</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.1.2.2\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.1\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Resolution</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.2\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Data</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.3\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Learning Rate</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.4\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Batch Size</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"A1.T3.1.2.2.5\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Iteration</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.6\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Resolution # Frame</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.7\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Data</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.8\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Learning Rate</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.9\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Batch Size</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"A1.T3.1.2.2.10\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">Iteration</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A1.T3.1.3.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.1\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">256</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.2\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">50M</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.3\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">2e-5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.4\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">10240</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"A1.T3.1.3.1.5\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">100k</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.6\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.7\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.8\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.9\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">-</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T3.1.3.1.10\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.1.4.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.1\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">512</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.2\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">18M HQ</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.3\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">2e-5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.4\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">768</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"A1.T3.1.4.2.5\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">50k</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.6\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">256 # 96</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.7\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">10M</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.8\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">2e-5</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.9\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">512</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T3.1.4.2.10\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">100k</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T3.1.5.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.1\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">1024</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.2\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">18M HQ</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.3\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">2e-5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.4\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">512</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"A1.T3.1.5.3.5\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">50k</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.6\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">512 # 96</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.7\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">3M HQ</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.8\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">2e-5</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.9\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">256</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T3.1.5.3.10\" style=\"padding-left:2.3pt;padding-right:2.3pt;\">50k</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 121 |
+
"capture": "Table 3: The training setups of Qihoo-T2I and Qihoo-T2V"
|
| 122 |
+
},
|
| 123 |
+
"4": {
|
| 124 |
+
"table_html": "<figure class=\"ltx_table\" id=\"A1.T4\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>The model configurations for various scales of PT-DiT.</figcaption>\n<table class=\"ltx_tabular ltx_centering ltx_guessed_headers ltx_align_middle\" id=\"A1.T4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"A1.T4.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"A1.T4.1.1.1.1\">Model</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"A1.T4.1.1.1.2\">Layers</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"A1.T4.1.1.1.3\">Hidden Dim</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"A1.T4.1.1.1.4\">Head Number</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" id=\"A1.T4.1.1.1.5\">Param. (M)</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"A1.T4.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"A1.T4.1.2.1.1\">PT-DiT/S-Class</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T4.1.2.1.2\">10</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T4.1.2.1.3\">288</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T4.1.2.1.4\">6</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"A1.T4.1.2.1.5\">32</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T4.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"A1.T4.1.3.2.1\">PT-DiT/B</th>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.3.2.2\">12</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.3.2.3\">640</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.3.2.4\">10</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.3.2.5\">144</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T4.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"A1.T4.1.4.3.1\">PT-DiT/L</th>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.4.3.2\">28</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.4.3.3\">864</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.4.3.4\">12</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.4.3.5\">605</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T4.1.5.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"A1.T4.1.5.4.1\">PT-DiT/XL</th>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.5.4.2\">28</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.5.4.3\">1152</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.5.4.4\">16</td>\n<td class=\"ltx_td ltx_align_center\" id=\"A1.T4.1.5.4.5\">1142</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"A1.T4.1.6.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"A1.T4.1.6.5.1\">PT-DiT/H</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T4.1.6.5.2\">30</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T4.1.6.5.3\">1440</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T4.1.6.5.4\">20</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"A1.T4.1.6.5.5\">1795</td>\n</tr>\n</tbody>\n</table>\n</figure>",
|
| 125 |
+
"capture": "Table 4: The model configurations for various scales of PT-DiT."
|
| 126 |
+
}
|
| 127 |
+
},
|
| 128 |
+
"image_paths": {
|
| 129 |
+
"1": {
|
| 130 |
+
"figure_path": "2409.04005v2_figure_1.png",
|
| 131 |
+
"caption": "Figure 1: The samples from Qihoo-T2I showcase high fidelity and aesthetic qualities, demonstrating a strong consistency with given textual descriptions.",
|
| 132 |
+
"url": "http://arxiv.org/html/2409.04005v2/x1.png"
|
| 133 |
+
},
|
| 134 |
+
"2": {
|
| 135 |
+
"figure_path": "2409.04005v2_figure_2.png",
|
| 136 |
+
"caption": "Figure 2: Comparison of complexity between PixArt-\u03b1\ud835\udefc\\alphaitalic_\u03b1 and PT-DiT/L at various resolutions.",
|
| 137 |
+
"url": "http://arxiv.org/html/2409.04005v2/x2.png"
|
| 138 |
+
},
|
| 139 |
+
"3": {
|
| 140 |
+
"figure_path": "2409.04005v2_figure_3.png",
|
| 141 |
+
"caption": "Figure 3: The attention map of self-attention in PixArt-\u03b1\ud835\udefc\\alphaitalic_\u03b1 at 512 resolution. We assemble the attention map for 16 tokens within a 4\u00d74444\\times 44 \u00d7 4 spatial window. The vertical axis represents different tokens within the window, and the horizontal axis represents their correlation with all latent tokens. It is evident that the attention of different tokens in the same window is almost identical for spatially distant tokens, whereas there is noticeable variation for spatially neighboring tokens.",
|
| 142 |
+
"url": "http://arxiv.org/html/2409.04005v2/x3.png"
|
| 143 |
+
},
|
| 144 |
+
"4": {
|
| 145 |
+
"figure_path": "2409.04005v2_figure_4.png",
|
| 146 |
+
"caption": "Figure 4: The overall architecture of PT-DiT.\nThe image or video undergoes processing through a 3D VAE, followed by noise addition, patch embedding, and positional encoding to generate latent tokens.\nWe replace global attention with proxy-tokenized attention to establish contextual associations and employ visual cross-attention to propagate this information to all tokens, thereby reducing computational redundancy. Moreover, texture detail modeling is enhanced through window attention and shifted window attention.",
|
| 147 |
+
"url": "http://arxiv.org/html/2409.04005v2/x4.png"
|
| 148 |
+
},
|
| 149 |
+
"5": {
|
| 150 |
+
"figure_path": "2409.04005v2_figure_5.png",
|
| 151 |
+
"caption": "Figure 5: Qualitative comparison of Text-to-Image generation models.",
|
| 152 |
+
"url": "http://arxiv.org/html/2409.04005v2/x5.png"
|
| 153 |
+
},
|
| 154 |
+
"6": {
|
| 155 |
+
"figure_path": "2409.04005v2_figure_6.png",
|
| 156 |
+
"caption": "Figure 6: Qualitative comparison of Text-to-Video generation models.",
|
| 157 |
+
"url": "http://arxiv.org/html/2409.04005v2/x6.png"
|
| 158 |
+
},
|
| 159 |
+
"7": {
|
| 160 |
+
"figure_path": "2409.04005v2_figure_7.png",
|
| 161 |
+
"caption": "Figure 7: Samples by Qihoo-T2MV. It is important to note that Qihoo-T2MV does not accept any image inputs or camera parameters and relies solely on text prompts.",
|
| 162 |
+
"url": "http://arxiv.org/html/2409.04005v2/x7.png"
|
| 163 |
+
},
|
| 164 |
+
"8": {
|
| 165 |
+
"figure_path": "2409.04005v2_figure_8.png",
|
| 166 |
+
"caption": "Figure 8: Comparison of image generation models in terms of GFLOPs.",
|
| 167 |
+
"url": "http://arxiv.org/html/2409.04005v2/x8.png"
|
| 168 |
+
},
|
| 169 |
+
"9": {
|
| 170 |
+
"figure_path": "2409.04005v2_figure_9.png",
|
| 171 |
+
"caption": "Figure 9: Comparison of video generation models in terms of GFLOPs and GPU memory usage.",
|
| 172 |
+
"url": "http://arxiv.org/html/2409.04005v2/x9.png"
|
| 173 |
+
},
|
| 174 |
+
"10": {
|
| 175 |
+
"figure_path": "2409.04005v2_figure_10.png",
|
| 176 |
+
"caption": "Figure 10: Ablation on shift-window attention.",
|
| 177 |
+
"url": "http://arxiv.org/html/2409.04005v2/x10.png"
|
| 178 |
+
}
|
| 179 |
+
},
|
| 180 |
+
"validation": true,
|
| 181 |
+
"references": [
|
| 182 |
+
{
|
| 183 |
+
"1": {
|
| 184 |
+
"title": "Frozen in time: A joint video and image encoder for end-to-end retrieval.",
|
| 185 |
+
"author": "Max Bain, Arsha Nagrani, G\u00fcl Varol, and Andrew Zisserman.",
|
| 186 |
+
"venue": "In Proceedings of the IEEE/CVF international conference on computer vision, pp. 1728\u20131738, 2021.",
|
| 187 |
+
"url": null
|
| 188 |
+
}
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"2": {
|
| 192 |
+
"title": "All are worth words: A vit backbone for diffusion models.",
|
| 193 |
+
"author": "Fan Bao, Shen Nie, Kaiwen Xue, Yue Cao, Chongxuan Li, Hang Su, and Jun Zhu.",
|
| 194 |
+
"venue": "In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 22669\u201322679, 2023.",
|
| 195 |
+
"url": null
|
| 196 |
+
}
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"3": {
|
| 200 |
+
"title": "Lumiere: A space-time diffusion model for video generation.",
|
| 201 |
+
"author": "Omer Bar-Tal, Hila Chefer, Omer Tov, Charles Herrmann, Roni Paiss, Shiran Zada, Ariel Ephrat, Junhwa Hur, Yuanzhen Li, Tomer Michaeli, et al.",
|
| 202 |
+
"venue": "arXiv preprint arXiv:2401.12945, 2024.",
|
| 203 |
+
"url": null
|
| 204 |
+
}
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"4": {
|
| 208 |
+
"title": "Flux.",
|
| 209 |
+
"author": "BlackForestlabs AI.",
|
| 210 |
+
"venue": "https://blackforestlabs.ai/#get-flux, 2024.",
|
| 211 |
+
"url": null
|
| 212 |
+
}
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"5": {
|
| 216 |
+
"title": "Stable video diffusion: Scaling latent video diffusion models to large datasets.",
|
| 217 |
+
"author": "Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, et al.",
|
| 218 |
+
"venue": "arXiv preprint arXiv:2311.15127, 2023.",
|
| 219 |
+
"url": null
|
| 220 |
+
}
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"6": {
|
| 224 |
+
"title": "Pixart-: Fast training of diffusion transformer for photorealistic text-to-image synthesis.",
|
| 225 |
+
"author": "Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, et al.",
|
| 226 |
+
"venue": "arXiv preprint arXiv:2310.00426, 2023.",
|
| 227 |
+
"url": null
|
| 228 |
+
}
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"7": {
|
| 232 |
+
"title": "Pixart-: Weak-to-strong training of diffusion transformer for 4k text-to-image generation.",
|
| 233 |
+
"author": "Junsong Chen, Chongjian Ge, Enze Xie, Yue Wu, Lewei Yao, Xiaozhe Ren, Zhongdao Wang, Ping Luo, Huchuan Lu, and Zhenguo Li.",
|
| 234 |
+
"venue": "arXiv preprint arXiv:2403.04692, 2024a.",
|
| 235 |
+
"url": null
|
| 236 |
+
}
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"8": {
|
| 240 |
+
"title": "Pixart-: Fast and controllable image generation with latent consistency models.",
|
| 241 |
+
"author": "Junsong Chen, Yue Wu, Simian Luo, Enze Xie, Sayak Paul, Ping Luo, Hang Zhao, and Zhenguo Li.",
|
| 242 |
+
"venue": "arXiv preprint arXiv:2401.05252, 2024b.",
|
| 243 |
+
"url": null
|
| 244 |
+
}
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"9": {
|
| 248 |
+
"title": "Imagenet: A large-scale hierarchical image database.",
|
| 249 |
+
"author": "Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei.",
|
| 250 |
+
"venue": "In 2009 IEEE conference on computer vision and pattern recognition, pp. 248\u2013255. Ieee, 2009.",
|
| 251 |
+
"url": null
|
| 252 |
+
}
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"10": {
|
| 256 |
+
"title": "The llama 3 herd of models.",
|
| 257 |
+
"author": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al.",
|
| 258 |
+
"venue": "arXiv preprint arXiv:2407.21783, 2024.",
|
| 259 |
+
"url": null
|
| 260 |
+
}
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"11": {
|
| 264 |
+
"title": "Fancyvideo: Towards dynamic and consistent video generation via cross-frame textual guidance.",
|
| 265 |
+
"author": "Jiasong Feng, Ao Ma, Jing Wang, Bo Cheng, Xiaodan Liang, Dawei Leng, and Yuhui Yin.",
|
| 266 |
+
"venue": "arXiv preprint arXiv:2408.08189, 2024.",
|
| 267 |
+
"url": null
|
| 268 |
+
}
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"12": {
|
| 272 |
+
"title": "Lumina-t2x: Transforming text into any modality, resolution, and duration via flow-based large diffusion transformers.",
|
| 273 |
+
"author": "Peng Gao, Le Zhuo, Ziyi Lin, Chris Liu, Junsong Chen, Ruoyi Du, Enze Xie, Xu Luo, Longtian Qiu, Yuhang Zhang, et al.",
|
| 274 |
+
"venue": "arXiv preprint arXiv:2405.05945, 2024.",
|
| 275 |
+
"url": null
|
| 276 |
+
}
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"13": {
|
| 280 |
+
"title": "Animatediff: Animate your personalized text-to-image diffusion models without specific tuning.",
|
| 281 |
+
"author": "Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai.",
|
| 282 |
+
"venue": "arXiv preprint arXiv:2307.04725, 2023.",
|
| 283 |
+
"url": null
|
| 284 |
+
}
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"14": {
|
| 288 |
+
"title": "Agent attention: On the integration of softmax and linear attention.",
|
| 289 |
+
"author": "Dongchen Han, Tianzhu Ye, Yizeng Han, Zhuofan Xia, Shiji Song, and Gao Huang.",
|
| 290 |
+
"venue": "arXiv preprint arXiv:2312.08874, 2023.",
|
| 291 |
+
"url": null
|
| 292 |
+
}
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"15": {
|
| 296 |
+
"title": "A survey on vision transformer.",
|
| 297 |
+
"author": "Kai Han, Yunhe Wang, Hanting Chen, Xinghao Chen, Jianyuan Guo, Zhenhua Liu, Yehui Tang, An Xiao, Chunjing Xu, Yixing Xu, et al.",
|
| 298 |
+
"venue": "IEEE transactions on pattern analysis and machine intelligence, 45(1):87\u2013110, 2022.",
|
| 299 |
+
"url": null
|
| 300 |
+
}
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"16": {
|
| 304 |
+
"title": "Gans trained by a two time-scale update rule converge to a local nash equilibrium.",
|
| 305 |
+
"author": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter.",
|
| 306 |
+
"venue": "Advances in neural information processing systems, 30, 2017.",
|
| 307 |
+
"url": null
|
| 308 |
+
}
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"17": {
|
| 312 |
+
"title": "Lookupvit: Compressing visual information to a limited number of tokens.",
|
| 313 |
+
"author": "Rajat Koner, Gagan Jain, Prateek Jain, Volker Tresp, and Sujoy Paul.",
|
| 314 |
+
"venue": "arXiv preprint arXiv:2407.12753, 2024.",
|
| 315 |
+
"url": null
|
| 316 |
+
}
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"18": {
|
| 320 |
+
"title": "Kling.",
|
| 321 |
+
"author": "Kuaishou.",
|
| 322 |
+
"venue": "https://klingai.kuaishou.com/, 2024.",
|
| 323 |
+
"url": null
|
| 324 |
+
}
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"19": {
|
| 328 |
+
"title": "Open-sora-plan, April 2024.",
|
| 329 |
+
"author": "PKU-Yuan Lab and Tuzhan AI etc.",
|
| 330 |
+
"venue": "URL https://doi.org/10.5281/zenodo.10948109.",
|
| 331 |
+
"url": null
|
| 332 |
+
}
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"20": {
|
| 336 |
+
"title": "Hunyuan-dit: A powerful multi-resolution diffusion transformer with fine-grained chinese understanding, 2024a.",
|
| 337 |
+
"author": "Zhimin Li, Jianwei Zhang, Qin Lin, Jiangfeng Xiong, Yanxin Long, Xinchi Deng, Yingfang Zhang, Xingchao Liu, Minbin Huang, Zedong Xiao, Dayou Chen, Jiajun He, Jiahao Li, Wenyue Li, Chen Zhang, Rongwei Quan, Jianxiang Lu, Jiabin Huang, Xiaoyan Yuan, Xiaoxiao Zheng, Yixuan Li, Jihong Zhang, Chao Zhang, Meng Chen, Jie Liu, Zheng Fang, Weiyan Wang, Jinbao Xue, Yangyu Tao, Jianchen Zhu, Kai Liu, Sihuan Lin, Yifu Sun, Yun Li, Dongdong Wang, Mingtao Chen, Zhichao Hu, Xiao Xiao, Yan Chen, Yuhong Liu, Wei Liu, Di Wang, Yong Yang, Jie Jiang, and Qinglin Lu.",
|
| 338 |
+
"venue": null,
|
| 339 |
+
"url": null
|
| 340 |
+
}
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"21": {
|
| 344 |
+
"title": "Hunyuan-dit: A powerful multi-resolution diffusion transformer with fine-grained chinese understanding.",
|
| 345 |
+
"author": "Zhimin Li, Jianwei Zhang, Qin Lin, Jiangfeng Xiong, Yanxin Long, Xinchi Deng, Yingfang Zhang, Xingchao Liu, Minbin Huang, Zedong Xiao, et al.",
|
| 346 |
+
"venue": "arXiv preprint arXiv:2405.08748, 2024b.",
|
| 347 |
+
"url": null
|
| 348 |
+
}
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"22": {
|
| 352 |
+
"title": "Microsoft coco: Common objects in context.",
|
| 353 |
+
"author": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick.",
|
| 354 |
+
"venue": "In Computer Vision\u2013ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pp. 740\u2013755. Springer, 2014.",
|
| 355 |
+
"url": null
|
| 356 |
+
}
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"23": {
|
| 360 |
+
"title": "Flow matching for generative modeling.",
|
| 361 |
+
"author": "Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le.",
|
| 362 |
+
"venue": "arXiv preprint arXiv:2210.02747, 2022.",
|
| 363 |
+
"url": null
|
| 364 |
+
}
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"24": {
|
| 368 |
+
"title": "Swin transformer: Hierarchical vision transformer using shifted windows.",
|
| 369 |
+
"author": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo.",
|
| 370 |
+
"venue": "In Proceedings of the IEEE/CVF international conference on computer vision, pp. 10012\u201310022, 2021.",
|
| 371 |
+
"url": null
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"25": {
|
| 376 |
+
"title": "Vdt: General-purpose video diffusion transformers via mask modeling.",
|
| 377 |
+
"author": "Haoyu Lu, Guoxing Yang, Nanyi Fei, Yuqi Huo, Zhiwu Lu, Ping Luo, and Mingyu Ding.",
|
| 378 |
+
"venue": "arXiv preprint arXiv:2305.13311, 2023.",
|
| 379 |
+
"url": null
|
| 380 |
+
}
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"26": {
|
| 384 |
+
"title": "Sit: Exploring flow and diffusion-based generative models with scalable interpolant transformers.",
|
| 385 |
+
"author": "Nanye Ma, Mark Goldstein, Michael S Albergo, Nicholas M Boffi, Eric Vanden-Eijnden, and Saining Xie.",
|
| 386 |
+
"venue": "arXiv preprint arXiv:2401.08740, 2024a.",
|
| 387 |
+
"url": null
|
| 388 |
+
}
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"27": {
|
| 392 |
+
"title": "Latte: Latent diffusion transformer for video generation.",
|
| 393 |
+
"author": "Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao.",
|
| 394 |
+
"venue": "arXiv preprint arXiv:2401.03048, 2024b.",
|
| 395 |
+
"url": null
|
| 396 |
+
}
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"28": {
|
| 400 |
+
"title": "Sora.",
|
| 401 |
+
"author": "OpenAI.",
|
| 402 |
+
"venue": "https://openai.com/, 2024.",
|
| 403 |
+
"url": null
|
| 404 |
+
}
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"29": {
|
| 408 |
+
"title": "Scalable diffusion models with transformers.",
|
| 409 |
+
"author": "William Peebles and Saining Xie.",
|
| 410 |
+
"venue": "In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4195\u20134205, 2023.",
|
| 411 |
+
"url": null
|
| 412 |
+
}
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"30": {
|
| 416 |
+
"title": "Efficiently scaling transformer inference.",
|
| 417 |
+
"author": "Reiner Pope, Sholto Douglas, Aakanksha Chowdhery, Jacob Devlin, James Bradbury, Jonathan Heek, Kefan Xiao, Shivani Agrawal, and Jeff Dean.",
|
| 418 |
+
"venue": "Proceedings of Machine Learning and Systems, 5:606\u2013624, 2023.",
|
| 419 |
+
"url": null
|
| 420 |
+
}
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"31": {
|
| 424 |
+
"title": "Efficient diffusion transformer with step-wise dynamic attention mediators.",
|
| 425 |
+
"author": "Yifan Pu, Zhuofan Xia, Jiayi Guo, Dongchen Han, Qixiu Li, Duo Li, Yuhui Yuan, Ji Li, Yizeng Han, Shiji Song, et al.",
|
| 426 |
+
"venue": "arXiv preprint arXiv:2408.05710, 2024.",
|
| 427 |
+
"url": null
|
| 428 |
+
}
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"32": {
|
| 432 |
+
"title": "Richdreamer: A generalizable normal-depth diffusion model for detail richness in text-to-3d.",
|
| 433 |
+
"author": "Lingteng Qiu, Guanying Chen, Xiaodong Gu, Qi Zuo, Mutian Xu, Yushuang Wu, Weihao Yuan, Zilong Dong, Liefeng Bo, and Xiaoguang Han.",
|
| 434 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9914\u20139925, 2024.",
|
| 435 |
+
"url": null
|
| 436 |
+
}
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"33": {
|
| 440 |
+
"title": "Hierarchical text-conditional image generation with clip latents.",
|
| 441 |
+
"author": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen.",
|
| 442 |
+
"venue": "arXiv preprint arXiv:2204.06125, 1(2):3, 2022.",
|
| 443 |
+
"url": null
|
| 444 |
+
}
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"34": {
|
| 448 |
+
"title": "High-resolution image synthesis with latent diffusion models.",
|
| 449 |
+
"author": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Bj\u00f6rn Ommer.",
|
| 450 |
+
"venue": "In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10684\u201310695, 2022.",
|
| 451 |
+
"url": null
|
| 452 |
+
}
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"35": {
|
| 456 |
+
"title": "Photorealistic text-to-image diffusion models with deep language understanding.",
|
| 457 |
+
"author": "Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al.",
|
| 458 |
+
"venue": "Advances in neural information processing systems, 35:36479\u201336494, 2022.",
|
| 459 |
+
"url": null
|
| 460 |
+
}
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"36": {
|
| 464 |
+
"title": "Laion-5b: An open large-scale dataset for training next generation image-text models.",
|
| 465 |
+
"author": "Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al.",
|
| 466 |
+
"venue": "Advances in Neural Information Processing Systems, 35:25278\u201325294, 2022.",
|
| 467 |
+
"url": null
|
| 468 |
+
}
|
| 469 |
+
},
|
| 470 |
+
{
|
| 471 |
+
"37": {
|
| 472 |
+
"title": "Vidu.",
|
| 473 |
+
"author": "Shengshu AI.",
|
| 474 |
+
"venue": "https://www.vidu.studio/, 2024.",
|
| 475 |
+
"url": null
|
| 476 |
+
}
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"38": {
|
| 480 |
+
"title": "Mvdream: Multi-view diffusion for 3d generation.",
|
| 481 |
+
"author": "Yichun Shi, Peng Wang, Jianglong Ye, Mai Long, Kejie Li, and Xiao Yang.",
|
| 482 |
+
"venue": "arXiv preprint arXiv:2308.16512, 2023.",
|
| 483 |
+
"url": null
|
| 484 |
+
}
|
| 485 |
+
},
|
| 486 |
+
{
|
| 487 |
+
"39": {
|
| 488 |
+
"title": "A dataset of 101 human action classes from videos in the wild.",
|
| 489 |
+
"author": "Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah.",
|
| 490 |
+
"venue": "Center for Research in Computer Vision, 2(11):1\u20137, 2012.",
|
| 491 |
+
"url": null
|
| 492 |
+
}
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"40": {
|
| 496 |
+
"title": "Stablediffusion3.",
|
| 497 |
+
"author": "Stability AI.",
|
| 498 |
+
"venue": "https://stability.ai/news/stable-diffusion-3, 2024.",
|
| 499 |
+
"url": null
|
| 500 |
+
}
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"41": {
|
| 504 |
+
"title": "Roformer: Enhanced transformer with rotary position embedding.",
|
| 505 |
+
"author": "Jianlin Su, Murtadha Ahmed, Yu Lu, Shengfeng Pan, Wen Bo, and Yunfeng Liu.",
|
| 506 |
+
"venue": "Neurocomputing, 568:127063, 2024.",
|
| 507 |
+
"url": null
|
| 508 |
+
}
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"42": {
|
| 512 |
+
"title": "Kolors: Effective training of diffusion model for photorealistic text-to-image synthesis.",
|
| 513 |
+
"author": "Kolors Team.",
|
| 514 |
+
"venue": "arXiv preprint, 2024.",
|
| 515 |
+
"url": null
|
| 516 |
+
}
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"43": {
|
| 520 |
+
"title": "Dynamicrafter: Animating open-domain images with video diffusion priors.",
|
| 521 |
+
"author": "Jinbo Xing, Menghan Xia, Yong Zhang, Haoxin Chen, Xintao Wang, Tien-Tsin Wong, and Ying Shan.",
|
| 522 |
+
"venue": "arXiv preprint arXiv:2310.12190, 2023.",
|
| 523 |
+
"url": null
|
| 524 |
+
}
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"44": {
|
| 528 |
+
"title": "Easyanimate: A high-performance long video generation method based on transformer architecture.",
|
| 529 |
+
"author": "Jiaqi Xu, Xinyi Zou, Kunzhe Huang, Yunkuo Chen, Bo Liu, MengLi Cheng, Xing Shi, and Jun Huang.",
|
| 530 |
+
"venue": "arXiv preprint arXiv:2405.18991, 2024.",
|
| 531 |
+
"url": null
|
| 532 |
+
}
|
| 533 |
+
},
|
| 534 |
+
{
|
| 535 |
+
"45": {
|
| 536 |
+
"title": "Msr-vtt: A large video description dataset for bridging video and language.",
|
| 537 |
+
"author": "Jun Xu, Tao Mei, Ting Yao, and Yong Rui.",
|
| 538 |
+
"venue": "In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5288\u20135296, 2016.",
|
| 539 |
+
"url": null
|
| 540 |
+
}
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"46": {
|
| 544 |
+
"title": "Raphael: Text-to-image generation via large mixture of diffusion paths.",
|
| 545 |
+
"author": "Zeyue Xue, Guanglu Song, Qiushan Guo, Boxiao Liu, Zhuofan Zong, Yu Liu, and Ping Luo.",
|
| 546 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2024.",
|
| 547 |
+
"url": null
|
| 548 |
+
}
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"47": {
|
| 552 |
+
"title": "Cogvideox: Text-to-video diffusion models with an expert transformer.",
|
| 553 |
+
"author": "Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al.",
|
| 554 |
+
"venue": "arXiv preprint arXiv:2408.06072, 2024.",
|
| 555 |
+
"url": null
|
| 556 |
+
}
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"48": {
|
| 560 |
+
"title": "An image is worth 32 tokens for reconstruction and generation.",
|
| 561 |
+
"author": "Qihang Yu, Mark Weber, Xueqing Deng, Xiaohui Shen, Daniel Cremers, and Liang-Chieh Chen.",
|
| 562 |
+
"venue": "arXiv preprint arXiv:2406.07550, 2024.",
|
| 563 |
+
"url": null
|
| 564 |
+
}
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"49": {
|
| 568 |
+
"title": "Make pixels dance: High-dynamic video generation.",
|
| 569 |
+
"author": "Yan Zeng, Guoqiang Wei, Jiani Zheng, Jiaxin Zou, Yang Wei, Yuchen Zhang, and Hang Li.",
|
| 570 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8850\u20138860, 2024.",
|
| 571 |
+
"url": null
|
| 572 |
+
}
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"50": {
|
| 576 |
+
"title": "Root mean square layer normalization.",
|
| 577 |
+
"author": "Biao Zhang and Rico Sennrich.",
|
| 578 |
+
"venue": "Advances in Neural Information Processing Systems, 32, 2019.",
|
| 579 |
+
"url": null
|
| 580 |
+
}
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"51": {
|
| 584 |
+
"title": "Open-sora: Democratizing efficient video production for all, March 2024.",
|
| 585 |
+
"author": "Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You.",
|
| 586 |
+
"venue": "URL https://github.com/hpcaitech/Open-Sora.",
|
| 587 |
+
"url": null
|
| 588 |
+
}
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"52": {
|
| 592 |
+
"title": "Lumina-next: Making lumina-t2x stronger and faster with next-dit.",
|
| 593 |
+
"author": "Le Zhuo, Ruoyi Du, Han Xiao, Yangguang Li, Dongyang Liu, Rongjie Huang, Wenze Liu, Lirui Zhao, Fu-Yun Wang, Zhanyu Ma, et al.",
|
| 594 |
+
"venue": "arXiv preprint arXiv:2406.18583, 2024.",
|
| 595 |
+
"url": null
|
| 596 |
+
}
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"53": {
|
| 600 |
+
"title": "Videomv: Consistent multi-view generation based on large video generative model.",
|
| 601 |
+
"author": "Qi Zuo, Xiaodong Gu, Lingteng Qiu, Yuan Dong, Zhengyi Zhao, Weihao Yuan, Rui Peng, Siyu Zhu, Zilong Dong, Liefeng Bo, et al.",
|
| 602 |
+
"venue": "arXiv preprint arXiv:2403.12010, 2024.",
|
| 603 |
+
"url": null
|
| 604 |
+
}
|
| 605 |
+
}
|
| 606 |
+
],
|
| 607 |
+
"url": "http://arxiv.org/html/2409.04005v2"
|
| 608 |
+
}
|
20241004/2409.07272v3.json
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "RePlay: a Recommendation Framework for Experimentation and Production Use",
|
| 3 |
+
"abstract": "Using a single tool to build and compare recommender systems significantly reduces the time to market for new models. In addition, the comparison results when using such tools look more consistent. This is why many different tools and libraries for researchers in the field of recommendations have recently appeared. Unfortunately, most of these frameworks are aimed primarily at researchers and require modification for use in production due to the inability to work on large datasets or an inappropriate architecture. In this demo, we present our open-source toolkit RePlay - a framework containing an end-to-end pipeline for building recommender systems, which is ready for production use. RePlay also allows you to use a suitable stack for the pipeline on each stage: Pandas, Polars, or Spark. This allows the library to scale computations and deploy to a cluster. Thus, RePlay allows data scientists to easily move from research mode to production mode using the same interfaces.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "The field of recommender systems has been actively developing in recent decades, with new algorithms and approaches constantly emerging and improving. Researchers also increasingly pay attention to the issue of reproducibility and verification of certain methods and their correct evaluation (Shevchenko et al., 2024 ###reference_b19###; Klenitskiy et al., 2024 ###reference_b12###).\nDue to the increase and diversity of various recommender algorithms, researchers and engineers from the industry have difficulties with reproducing and comparing recommender algorithms since, in scientific articles, the code provided by the authors may not work or be absent altogether. Other stages of the recommender pipeline, such as calculating metrics, splitting, and preprocessing, may also differ. That is why several frameworks for recommendations have appeared in the last few years, such as (Michiels et al., 2022 ###reference_b16###; Anelli et al., 2021 ###reference_b3###; Zhao et al., 2022 ###reference_b25###; Graham et al., 2019 ###reference_b7###). They make the process of comparing recommender algorithms more universal and allow to avoid simple errors when comparing, such as different implementations of metrics in the compared algorithms (Tamm et al., 2021 ###reference_b21###).\n###figure_1### Such libraries usually contain a fairly large set of algorithms and metrics for comparison. However, most of these frameworks are intended specifically for researchers since they can work quite slowly, or the code will require revision for implementation in production. We present RePlay - a framework that contains all stages of the recommendation pipeline but is more focused on use in production. Our library is an experimentation and production toolkit for top-N recommendation. RePlay has rich test coverage and detailed documentation.\nRePlay supports three types of dataframes: Spark, Polars, and Pandas, as well as different types of hardware architecture: CPU, GPU, and cluster, so you can choose a convenient configuration on each stage of the pipeline depending on the model and your hardware. In addition, many basic models are written in Spark or are wrappers of Spark implementations, which makes it easy to scale computations and deploy to a cluster. Of the frameworks described above, only Recommenders (Graham et al., 2019 ###reference_b7###) support both Spark and Pandas.\nFigure 1 ###reference_### shows the experimentation and production pipelines with RePlay.\nThe main features of RePlay are the following:\nProduction ready code, which can be embedded in your recommendation platform\nPossible to implement both experimentation and production pipelines\nSupport for various types of dataframes: Spark, Polars, Pandas\nSupport for various types of hardware architectures: CPU, GPU, Cluster\nIn this demo, we will give you an overview of the various RePlay modules and how to use them for experimentation and production. RePlay source code is available on GitHub111https://github.com/sb-ai-lab/RePlay."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "2. RePlay",
|
| 15 |
+
"text": "The main components of the library are briefly described below."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "3. Setup",
|
| 21 |
+
"text": "RePlay library is available for installation from the site pypi.org ###reference_### under the name replay-rec. The core package without PySpark and PyTorch dependencies will be installed by default. For convenience, the PySpark and PyTorch functionality can be installed separately as additional extras. For more details, see the Installation ###reference_ain/README.md#installation### section in the Repository or the Library Documentation ###reference_tml###.\nSome RePlay functionality is available in the experimental package. The core package requires a limited number of dependencies to be easily implemented into production. The models from the experimental package could differ from the core package models in APIs. The experimental package does not have strict test coverage requirements as the core one to allow fast experiments. The experimental package, indicated with the rc0 suffix, can also be installed from PyPI.\nTo raise issues or ask questions about the use of RePlay, check out the source code and Contribution Guidelines ###reference_ain/CONTRIBUTING.md### on GitHub."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "4",
|
| 25 |
+
"parent_section_id": null,
|
| 26 |
+
"section_name": "4. Demo",
|
| 27 |
+
"text": "In the demo, we demonstrate the main stages of the RePlay pipeline using the MovieLens 1M dataset (Harper and Konstan, 2015 ###reference_b8###). First, we apply a leave-one-out split with the LastNSplitter. Next, we load the data into the Dataset class, standard for all RePlay models, and transform it with the SequenceTokenizer to store the data as sequences. Further, we train the SASRec model and measure the MAP, NDCG, and Recall on the validation data. After that, we get the recommendations in different data types: PySpark, Pandas, Polars dataframes, and PyTorch Tensors. Finally, we calculate various metrics for the different K on the test data with the OfflineMetrics class."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "5",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "5. Conclusion and future work",
|
| 33 |
+
"text": "RePlay provides all the essential pipeline steps for recommender system researchers and developers. It allows you to conduct experiments with Pandas or Polars and facilitates a smooth transition to Spark for large-scale computations in production.\nRePlay is actively maintained and developed. In the near future, we plan to expand the number of neural network algorithms, as well as basic algorithms implemented on Polars."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "6",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "6. Acknowledgements",
|
| 39 |
+
"text": "We wish to express our sincere gratitude to the people, who have made significant contributions to the development of RePlay: Boris Shminke, Yan-Martin Tamm, Alexey Grishanov, Nikolay Butakov, Egor Bodrov, Eduard Malov, Maxim Savchenko, and Alexander Tuzhilin. Additionally, we extend our heartfelt thanks to Alexandra Laricheva for her valuable assistance with the auxiliary materials for our demo."
|
| 40 |
+
}
|
| 41 |
+
],
|
| 42 |
+
"appendix": [],
|
| 43 |
+
"tables": {},
|
| 44 |
+
"image_paths": {
|
| 45 |
+
"1": {
|
| 46 |
+
"figure_path": "2409.07272v3_figure_1.png",
|
| 47 |
+
"caption": "Figure 1. RePlay pipeline. Using the RePlay framework, it is possible to implement both experimentation and production pipelines.",
|
| 48 |
+
"url": "http://arxiv.org/html/2409.07272v3/x1.png"
|
| 49 |
+
}
|
| 50 |
+
},
|
| 51 |
+
"validation": true,
|
| 52 |
+
"references": [
|
| 53 |
+
{
|
| 54 |
+
"1": {
|
| 55 |
+
"title": "Optuna: A Next-generation Hyperparameter Optimization Framework. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (Anchorage, AK, USA) (KDD \u201919). Association for Computing Machinery, New York, NY, USA, 2623\u20132631.",
|
| 56 |
+
"author": "Takuya Akiba, Shotaro Sano, Toshihiko Yanase, Takeru Ohta, and Masanori Koyama. 2019.",
|
| 57 |
+
"venue": "https://doi.org/10.1145/3292500.3330701",
|
| 58 |
+
"url": null
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"2": {
|
| 63 |
+
"title": "Elliot: A Comprehensive and Rigorous Framework for Reproducible Recommender Systems Evaluation. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval (Virtual Event, Canada) (SIGIR \u201921). Association for Computing Machinery, New York, NY, USA, 2405\u20132414.",
|
| 64 |
+
"author": "Vito Walter Anelli, Alejandro Bellogin, Antonio Ferrara, Daniele Malitesta, Felice Antonio Merra, Claudio Pomo, Francesco Maria Donini, and Tommaso Di Noia. 2021.",
|
| 65 |
+
"venue": "https://doi.org/10.1145/3404835.3463245",
|
| 66 |
+
"url": null
|
| 67 |
+
}
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"3": {
|
| 71 |
+
"title": "Finite-time analysis of the multiarmed bandit problem.",
|
| 72 |
+
"author": "Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. 2002.",
|
| 73 |
+
"venue": "Machine learning 47 (2002), 235\u2013256.",
|
| 74 |
+
"url": null
|
| 75 |
+
}
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"4": {
|
| 79 |
+
"title": "An empirical evaluation of thompson sampling.",
|
| 80 |
+
"author": "Olivier Chapelle and Lihong Li. 2011.",
|
| 81 |
+
"venue": "Advances in neural information processing systems 24 (2011).",
|
| 82 |
+
"url": null
|
| 83 |
+
}
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"5": {
|
| 87 |
+
"title": "The KL-UCB algorithm for bounded stochastic bandits and beyond. In Proceedings of the 24th annual conference on learning theory. JMLR Workshop and Conference Proceedings, 359\u2013376.",
|
| 88 |
+
"author": "Aur\u00e9lien Garivier and Olivier Capp\u00e9. 2011.",
|
| 89 |
+
"venue": "",
|
| 90 |
+
"url": null
|
| 91 |
+
}
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"6": {
|
| 95 |
+
"title": "Microsoft recommenders: tools to accelerate developing recommender systems. In Proceedings of the 13th ACM Conference on Recommender Systems (Copenhagen, Denmark) (RecSys \u201919). Association for Computing Machinery, New York, NY, USA, 542\u2013543.",
|
| 96 |
+
"author": "Scott Graham, Jun-Ki Min, and Tao Wu. 2019.",
|
| 97 |
+
"venue": "https://doi.org/10.1145/3298689.3346967",
|
| 98 |
+
"url": null
|
| 99 |
+
}
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"7": {
|
| 103 |
+
"title": "The movielens datasets: History and context.",
|
| 104 |
+
"author": "F Maxwell Harper and Joseph A Konstan. 2015.",
|
| 105 |
+
"venue": "Acm transactions on interactive intelligent systems (tiis) 5, 4 (2015), 1\u201319.",
|
| 106 |
+
"url": null
|
| 107 |
+
}
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"8": {
|
| 111 |
+
"title": "Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173\u2013182.",
|
| 112 |
+
"author": "Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017.",
|
| 113 |
+
"venue": "",
|
| 114 |
+
"url": null
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"9": {
|
| 119 |
+
"title": "Self-attentive sequential recommendation. In 2018 IEEE international conference on data mining (ICDM). IEEE, 197\u2013206.",
|
| 120 |
+
"author": "Wang-Cheng Kang and Julian McAuley. 2018.",
|
| 121 |
+
"venue": "",
|
| 122 |
+
"url": null
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"10": {
|
| 127 |
+
"title": "Turning Dross Into Gold Loss: is BERT4Rec really better than SASRec?. In Proceedings of the 17th ACM Conference on Recommender Systems (Singapore, Singapore) (RecSys \u201923). Association for Computing Machinery, New York, NY, USA, 1120\u20131125.",
|
| 128 |
+
"author": "Anton Klenitskiy and Alexey Vasilev. 2023.",
|
| 129 |
+
"venue": "https://doi.org/10.1145/3604915.3610644",
|
| 130 |
+
"url": null
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"11": {
|
| 135 |
+
"title": "Does It Look Sequential? An Analysis of Datasets for Evaluation of Sequential Recommendations. In Proceedings of the 18th ACM Conference on Recommender Systems (Bari, Italy) (RecSys \u201924). Association for Computing Machinery, Bari, Italy.",
|
| 136 |
+
"author": "Anton Klenitskiy, Anna Volodkevich, Anton Pembek, and Alexey Vasilev. 2024.",
|
| 137 |
+
"venue": "https://doi.org/10.1145/3640457.3688195",
|
| 138 |
+
"url": null
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"12": {
|
| 143 |
+
"title": "Metadata Embeddings for User and Item Cold-start Recommendations. In Proceedings of the 2nd Workshop on New Trends on Content-Based Recommender Systems co-located with 9th ACM Conference on Recommender Systems (RecSys 2015), Vienna, Austria, September 16-20, 2015. (CEUR Workshop Proceedings, Vol. 1448), Toine Bogers and Marijn Koolen (Eds.). CEUR-WS.org, 14\u201321.",
|
| 144 |
+
"author": "Maciej Kula. 2015.",
|
| 145 |
+
"venue": "http://ceur-ws.org/Vol-1448/paper4.pdf",
|
| 146 |
+
"url": null
|
| 147 |
+
}
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"13": {
|
| 151 |
+
"title": "Variational autoencoders for collaborative filtering. In Proceedings of the 2018 world wide web conference. 689\u2013698.",
|
| 152 |
+
"author": "Dawen Liang, Rahul G Krishnan, Matthew D Hoffman, and Tony Jebara. 2018.",
|
| 153 |
+
"venue": "",
|
| 154 |
+
"url": null
|
| 155 |
+
}
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"14": {
|
| 159 |
+
"title": "Deep reinforcement learning based recommendation with explicit user-item interactions modeling.",
|
| 160 |
+
"author": "Feng Liu, Ruiming Tang, Xutao Li, Weinan Zhang, Yunming Ye, Haokun Chen, Huifeng Guo, and Yuzhou Zhang. 2018.",
|
| 161 |
+
"venue": "arXiv preprint arXiv:1810.12027 (2018).",
|
| 162 |
+
"url": null
|
| 163 |
+
}
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"15": {
|
| 167 |
+
"title": "RecPack: An(other) Experimentation Toolkit for Top-N Recommendation using Implicit Feedback Data. In Proceedings of the 16th ACM Conference on Recommender Systems (Seattle, WA, USA) (RecSys \u201922). Association for Computing Machinery, New York, NY, USA, 648\u2013651.",
|
| 168 |
+
"author": "Lien Michiels, Robin Verachtert, and Bart Goethals. 2022.",
|
| 169 |
+
"venue": "https://doi.org/10.1145/3523227.3551472",
|
| 170 |
+
"url": null
|
| 171 |
+
}
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"16": {
|
| 175 |
+
"title": "Slim: Sparse linear methods for top-n recommender systems. In 2011 IEEE 11th international conference on data mining. IEEE, 497\u2013506.",
|
| 176 |
+
"author": "Xia Ning and George Karypis. 2011.",
|
| 177 |
+
"venue": "",
|
| 178 |
+
"url": null
|
| 179 |
+
}
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"17": {
|
| 183 |
+
"title": "gSASRec: Reducing Overconfidence in Sequential Recommendation Trained with Negative Sampling. In Proceedings of the 17th ACM Conference on Recommender Systems. 116\u2013128.",
|
| 184 |
+
"author": "Aleksandr Vladimirovich Petrov and Craig Macdonald. 2023.",
|
| 185 |
+
"venue": "",
|
| 186 |
+
"url": null
|
| 187 |
+
}
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"18": {
|
| 191 |
+
"title": "From Variability to Stability: Advancing RecSys Benchmarking Practices. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD \u201924). Association for Computing Machinery, New York, NY, USA, 5701\u20135712.",
|
| 192 |
+
"author": "Valeriy Shevchenko, Nikita Belousov, Alexey Vasilev, Vladimir Zholobov, Artyom Sosedka, Natalia Semenova, Anna Volodkevich, Andrey Savchenko, and Alexey Zaytsev. 2024.",
|
| 193 |
+
"venue": "https://doi.org/10.1145/3637528.3671655",
|
| 194 |
+
"url": null
|
| 195 |
+
}
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"19": {
|
| 199 |
+
"title": "BERT4Rec: Sequential recommendation with bidirectional encoder representations from transformer. In Proceedings of the 28th ACM international conference on information and knowledge management. 1441\u20131450.",
|
| 200 |
+
"author": "Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019.",
|
| 201 |
+
"venue": "",
|
| 202 |
+
"url": null
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"20": {
|
| 207 |
+
"title": "Quality Metrics in Recommender Systems: Do We Calculate Metrics Consistently?. In Proceedings of the 15th ACM Conference on Recommender Systems (Amsterdam, Netherlands) (RecSys \u201921). Association for Computing Machinery, New York, NY, USA, 708\u2013713.",
|
| 208 |
+
"author": "Yan-Martin Tamm, Rinchin Damdinov, and Alexey Vasilev. 2021.",
|
| 209 |
+
"venue": "https://doi.org/10.1145/3460231.3478848",
|
| 210 |
+
"url": null
|
| 211 |
+
}
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"21": {
|
| 215 |
+
"title": "Scaling Session-Based Transformer Recommendations using Optimized Negative Sampling and Loss Functions. In Proceedings of the 17th ACM Conference on Recommender Systems. 1023\u20131026.",
|
| 216 |
+
"author": "Timo Wilm, Philipp Normann, Sophie Baumeister, and Paul-Vincent Kobow. 2023.",
|
| 217 |
+
"venue": "",
|
| 218 |
+
"url": null
|
| 219 |
+
}
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"22": {
|
| 223 |
+
"title": "A general offline reinforcement learning framework for interactive recommendation. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 35. 4512\u20134520.",
|
| 224 |
+
"author": "Teng Xiao and Donglin Wang. 2021.",
|
| 225 |
+
"venue": "",
|
| 226 |
+
"url": null
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"23": {
|
| 231 |
+
"title": "User retention-oriented recommendation with decision transformer. In Proceedings of the ACM Web Conference 2023. 1141\u20131149.",
|
| 232 |
+
"author": "Kesen Zhao, Lixin Zou, Xiangyu Zhao, Maolin Wang, and Dawei Yin. 2023.",
|
| 233 |
+
"venue": "",
|
| 234 |
+
"url": null
|
| 235 |
+
}
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"24": {
|
| 239 |
+
"title": "RecBole 2.0: Towards a More Up-to-Date Recommendation Library. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management (Atlanta, GA, USA) (CIKM \u201922). Association for Computing Machinery, New York, NY, USA, 4722\u20134726.",
|
| 240 |
+
"author": "Wayne Xin Zhao, Yupeng Hou, Xingyu Pan, Chen Yang, Zeyu Zhang, Zihan Lin, Jingsen Zhang, Shuqing Bian, Jiakai Tang, Wenqi Sun, Yushuo Chen, Lanling Xu, Gaowei Zhang, Zhen Tian, Changxin Tian, Shanlei Mu, Xinyan Fan, Xu Chen, and Ji-Rong Wen. 2022.",
|
| 241 |
+
"venue": "https://doi.org/10.1145/3511808.3557680",
|
| 242 |
+
"url": null
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
],
|
| 246 |
+
"url": "http://arxiv.org/html/2409.07272v3"
|
| 247 |
+
}
|
20241004/2409.14247v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2409.16728v2.json
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "SDCL: Students Discrepancy-Informed Correction Learning for Semi-supervised Medical Image Segmentation",
|
| 3 |
+
"abstract": "Semi-supervised medical image segmentation (SSMIS) has been demonstrated the potential to mitigate the issue of\nlimited medical labeled data. However, confirmation and cognitive biases may affect the prevalent teacher-student based SSMIS methods due to erroneous pseudo-labels.\nTo tackle this challenge, we improve the mean teacher approach and propose the Students Discrepancy-Informed Correction Learning\n(SDCL) framework that includes two students and one non-trainable teacher, which utilizes the segmentation difference between the two students to guide the self-correcting learning.\nThe essence of SDCL is to identify the areas of\nsegmentation discrepancy as the potential bias areas, and then encourage the model to review the correct cognition and rectify\ntheir own biases in these areas.\nTo facilitate the bias correction learning with continuous review and rectification, two correction loss functions are employed to minimize the correct segmentation voxel distance and maximize the erroneous segmentation voxel entropy.\nWe conducted experiments on three public medical image datasets:\ntwo 3D datasets (CT and MRI) and one 2D dataset (MRI). The results show that our SDCL surpasses\nthe current State-of-the-Art (SOTA) methods by 2.57%, 3.04%, and 2.34% in the Dice score on\nthe Pancreas, LA, and ACDC datasets, respectively.\nIn addition, the accuracy of our method is very close to the fully supervised method on the ACDC dataset,\nand even exceeds the fully supervised method on the Pancreas and LA dataset.\n(Code available at https://github.com/pascalcpp/SDCL).",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Deep learning techniques have revolutionized medical image processing, mainly due to the superiority of neural network\nalgorithms over many conventional image processing techniques.\nRelative to the general computer vision field, medical image segmentation\nencounters the dual challenges of the scarcity of annotation data and the\ngreater complexity of datasets. These data constraints suppress the accuracy\nof medical image segmentation, and semi-supervised learning (SSL) methods are increasingly\nbeing recognized for their potential to tackle these challenges by leveraging both labeled and unlabeled data.\nCurrent popular SSMIS methods focus on self-training, uncertainty estimation, consistency regularization, and distribution alignment.\nSelf-training methods, such as self-training and co-training [30 ###reference_b30###, 2 ###reference_b2###], use current high-confidence\npseudo-labels and ground truths for iteratively training. Uncertainty estimation uses measures like information entropy to assess unlabeled data and guide pseudo-label filtering or weighting. Mean Teacher (MT) [21 ###reference_b21###]\nframework is a prevalent approach that enforces consistency between student and teacher models. UA-MT [28 ###reference_b28###] refines consistency learning of MT with uncertainty,\nwhile CoraNet [20 ###reference_b20###] applies different weights to teacher-generated pseudo-labels based on uncertainty.\nBCP [3 ###reference_b3###] aims to reduce the empirical distribution gap by learning common semantics from both labeled and unlabeled data in the MT framework.\nDespite the ongoing progress in SSMIS, confirmation and cognitive biases remains a critical limitation,\nespecially in the widely used semi-supervised learning based on teacher-student framework.\nSince the framework often adds input perturbations and applies consistency regularization between teacher and student,\na single model structure inevitably produces noisy or erroneous pseudo-labels [3 ###reference_b3###, 22 ###reference_b22###], resulting in model confirmation and cognitive biases [1 ###reference_b1###, 22 ###reference_b22###].\nThese biases limit the performance of the teacher-student framework and it is very difficult for the model to correct these biases on its own.\nRecently, methods like multi-student [7 ###reference_b7###, 15 ###reference_b15###] and multi-teacher [12 ###reference_b12###, 18 ###reference_b18###] have emerged to provide diverse pseudo-labels\nto mitigate confirmation and cognitive biases. Multi-student approaches involve cross-consistency learning,\nbut they may encounter training instability without an Exponential Moving Average (EMA) teacher,\nand diversity and stability can be di\ufb00icult to balance. On the other hand,\nthe multi-teacher methods employ a single student to update multiple teachers with various strategies\nto promote diverse learning. However, these methods are constrained by the single model structure,\nlimiting their ability to adequately address biases.\nThe researchers have also employed correction learning methods, such as [29 ###reference_b29###] incorporating dual-task network for bias correction and [23 ###reference_b23###] utilizing complementary network for mapping to ground truth. MCF [22 ###reference_b22###] proposes inter-subnet interaction as a means of bias corrections.\nIn this view, the teacher-student methods still lack a general approach for\nstablely rectify own biases using diverse information, and the incorporation of bias\ncorrection can help to improve the performance of SSMIS. Therefore, we propose\nstudents discrepancy-informed correction learning (SDCL) based on the Mean Teacher (MT) framework,\nfeaturing one self-ensembling teacher with two trainable students. We ensure stability with an\nEMA teacher and promote diversity by using students with different structures. SDCL\nconsiders the discrepancy areas between the segmentations of the two students as the\npotential bias areas, and then conducts correction learning in these areas. The contributions of this study include:\n(1) Different from the traditional teacher-student framework, we use two structurally different students and an EMA teacher to ensure the diversity and stability of the teacher-student framework.\n(2) We design a method to optimize bias correction learning that reviews correct cognition and rectifies error biases in the differences between the predictions of two students.\n(3) Our approach outperforms SOTA SSMIS methods on three datasets, and additionally, it performs comparably or surpasses the fully supervised method.\n###figure_1###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Methodology",
|
| 15 |
+
"text": ""
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "Problem Definition",
|
| 21 |
+
"text": "Given a medical image dataset , it contains labeled images and \nunlabeled images , i.e.\n,\nwhere\n and .\nEach 3D volume medical image \nin have label .\nThe output prediction of the model is\n.\nSDCL includes two students and a self-ensembling teacher."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "SDCL Framework",
|
| 27 |
+
"text": "Our framework consists of two phases: initial pre-training with labeled data using Copy-Paste augmentation [3 ###reference_b3###, 9 ###reference_b9###], followed by a semi-supervised learning (SSL) phase incorporating both labeled and unlabeled data. SSL begins by initializing students and the teacher with the pre-trained model.\nThe process of SSL includes three parts:\ni) obtaining the basic SSL segmentation losses based on the BCP strategy,\nii) obtaining DiffMask\n from student segmentation discrepancies to guide the framework in reviewing correct cognition voxels, and iii) generating ErrMask\n from the difference between student segmentations and mix labels, then creating DiffErrMask\n by multiplying and\n, which guides the repair of self-bias error voxels.\nTo achieve diversity between the two students,\nfor 3D tasks, we employ VNet as student A\nand ResVNet [22 ###reference_b22###] as student B. Meanwhile,\nfor 2D tasks, UNet is designated as student A,\nand ResUNet is assigned as student B.\nTo accurately evaluate the impact of discrepancy correction learning, efforts are made to minimize interference from other factors.\nWe use VNet/UNet (student A) for Exponential\nMoving Average (EMA) updates to the teacher,\naligning with other methodologies.\nIt\u2019s important to emphasize that the\nperformance gap between the two students is minimal.\nThe framework is illustrated in Fig. 1 ###reference_###."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "2.3",
|
| 31 |
+
"parent_section_id": "2",
|
| 32 |
+
"section_name": "Bidirectional Copy-Paste",
|
| 33 |
+
"text": "We combine our framework with the current SSMIS SOTA method BCP [3 ###reference_b3###].\nIn the SSL phase, we need to generate a zero-centered\nmask ,\nwhere 0 represents foreground and 1 represents background, the size of\n0 region is , . Next,\nwe use the mask to obtain mix images as the input of SSL as follows:\nTo obtain the pseudo labels,\n and are forwarded to the teacher to obtain pseudo-labels and . Because\npseudo-labels contain a lot of noise [3 ###reference_b3###, 10 ###reference_b10###], which is very\nharmful to model training, the optimized\npseudo labels and are obtained by selecting the\nlargest connected component of raw pseudo labels.\nThen the mix labels are defined as follows:\nwhere and , and denotes element-wise multiplication.\nNext, fed and into two students to obtain and for each student.\nFinally, we get BCP losses computed respectively by Eq. (3 ###reference_###) and Eq. (4 ###reference_###):\nconsists of Dice and Cross-entropy loss in equal proportions. Since ground truths are generally more accurate than pseudo labels,\n is used to control the weight between them."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "2.4",
|
| 37 |
+
"parent_section_id": "2",
|
| 38 |
+
"section_name": "Discrepancy Correction Learning",
|
| 39 |
+
"text": ""
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2.4.1",
|
| 43 |
+
"parent_section_id": "2.4",
|
| 44 |
+
"section_name": "2.4.1 Minimize Discrepancy Correct Distance.",
|
| 45 |
+
"text": "From an intuitively straightforward perspective, during training,\nwe can increase the weighting of learning in the correct regions to\nreview correct voxels and avoid model biases. Here, we minimize Mean Squared Error (MSE) to enhance the model\u2019s learning towards the correct voxels in discrepant regions.\nWe first apply argmax to and , resulting in and . Then, is then derived from the following formula:\n\nwhere A and B respectively represent student A and B, denotes XOR operation.\nIntuitively, we can obtain losses using a method similar to BCP\n, and the MSE losses are computed through Eq. (5 ###reference_###), Eq. (6 ###reference_###), and Eq. (7 ###reference_###):"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.4.2",
|
| 49 |
+
"parent_section_id": "2.4",
|
| 50 |
+
"section_name": "2.4.2 Maximize Discrepancy Erroneous Entropy.",
|
| 51 |
+
"text": "In the teacher-student framework, inherent confirmation and cognitive biases emerge. We incorporate a loss function for penalization to enable the model to self-correct these biases. Maximizing the entropy of erroneous voxels may be an effective strategy, pulling misclassified voxels in uncertain regions back to an initial state and redirecting them toward the correct direction.\nThe entropy of each voxel is defined as , where denotes the voxel\u2019s position, and . The objective is to maximize for each erroneously classified voxel. We employ Kullback-Leibler (KL) divergence [4 ###reference_b4###], a simple equivalent variant, to guide misclassified voxels in shifting their output distribution towards a uniform distribution. This minimizes the loss equation , where represents the uniform distribution that all components are equal to .\nBefore deriving losses, we obtain using the following expression:\n\nAfterward, .\nThe KL losses we obtain are as shown in the following equations Eq. (8 ###reference_###), Eq. (9 ###reference_###), and Eq. (10 ###reference_###):\nIn the final step, we form the total loss\nby linearly combining ,\n\nand with specific weights,\nas shown in Eq. (11 ###reference_###),\nMethod\nPancreas-CT\nLeft Atrium\n\n\nLb\nUnlb\nDice\nJac\n95HD\nASD\nLb\nUnlb\nDice\nJac\n95HD\nASD\n\nV-Net\n12\n0\n70.59\n56.77\n14.19\n2.25\n8\n0\n79.87\n67.60\n26.65\n7.94\n\nResV-Net\n12\n0\n68.94\n54.50\n13.86\n3.36\n8\n0\n80.07\n69.29\n19.50\n6.02\n\nV-Net\n62\n0\n82.60\n70.81\n5.61\n1.33\n80\n0\n91.47\n84.36\n5.48\n1.51\n\nResV-Net\n62\n0\n82.46\n70.50\n5.45\n1.44\n80\n0\n91.09\n83.90\n4.77\n1.75\n\nUA-MT [28 ###reference_b28###](MICCAI\u201919)\n\n\n77.26\n63.82\n11.90\n3.06\n\n\n87.79\n78.39\n8.68\n2.12\n\nDTC [14 ###reference_b14###](AAAI\u201921)\n\n\n78.27\n64.75\n8.36\n2.25\n\n\n87.51\n78.17\n8.23\n2.36\n\nCoraNet [20 ###reference_b20###](TMI\u201921)\n\n\n79.67\n66.69\n7.59\n1.89\n\n\n-\n-\n-\n-\n\nSS-Net [25 ###reference_b25###](MICCAI\u201922)\n\n\n-\n-\n-\n-\n\n\n88.55\n79.62\n7.49\n1.90\n\nMC-Net+ [24 ###reference_b24###](MIA\u201922)\n\n\n80.59\n68.08\n6.47\n1.74\n\n\n88.96\n80.25\n7.93\n1.86\n\nCAML [8 ###reference_b8###](MICCAI\u201923)\n12\n50\n-\n-\n-\n-\n8\n72\n89.62\n81.28\n8.76\n2.02\n\nDMD [26 ###reference_b26###](MICCAI\u201923)\n\n\n-\n-\n-\n-\n\n\n89.70\n81.42\n6.88\n1.78\n\nMCCauSSL [17 ###reference_b17###](ICCV\u201923)\n\n\n80.92\n68.26\n8.11\n1.53\n\n\n-\n-\n-\n-\n\nUPCoL [13 ###reference_b13###](MICCAI\u201923)\n\n\n81.78\n69.66\n3.78\n0.63\n\n\n-\n-\n-\n-\n\nBCP [3 ###reference_b3###](CVPR\u201923)\n\n\n82.91\n70.97\n6.43\n2.25\n\n\n89.62\n81.31\n6.81\n1.76\n\nSDCL(Ours)\n\n\n85.04\n74.22\n5.22\n1.48\n\n\n92.35\n85.83\n4.22\n1.44\nMethod\nACDC\n\n\nLb\nUnlb\nDice\nJac\n95HD\nASD\n\nU-Net\n7\n0\n79.41\n68.11\n9.35\n2.70\n\nResU-Net\n7\n0\n80.04\n68.73\n7.83\n1.94\n\nU-Net\n70\n0\n91.65\n84.93\n1.89\n0.56\n\nResU-Net\n70\n0\n90.44\n82.95\n1.77\n0.47\n\nUA-MT [28 ###reference_b28###](MICCAI\u201919)\n\n\n81.65\n70.64\n6.88\n2.02\n\nSASSNet [11 ###reference_b11###](MICCAI\u201920)\n\n\n84.50\n74.34\n5.42\n1.86\n\nDTC [14 ###reference_b14###](AAAI\u201921)\n\n\n84.29\n73.92\n12.81\n4.01\n\nSS-Net [25 ###reference_b25###](MICCAI\u201922)\n\n\n86.78\n77.67\n6.07\n1.40\n\nMC-Net+ [24 ###reference_b24###](MIA\u201922)\n7\n63\n87.10\n78.06\n6.68\n2.00\n\nDC-Net [6 ###reference_b6###](MICCAI\u201923)\n\n\n89.42\n81.37\n1.28\n0.38\n\nBCPCauSSL [17 ###reference_b17###](ICCV\u201923)\n\n\n89.66\n81.79\n3.67\n0.93\n\nBCP [3 ###reference_b3###](CVPR\u201923)\n\n\n88.84\n80.62\n3.98\n1.17\n\nSDCL(Ours)\n\n\n90.92\n83.83\n1.29\n0.34"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "Experiments",
|
| 57 |
+
"text": "Datasets.\nOur method is evaluated on three datasets: the Pancreas-NIH [19 ###reference_b19###] with 82 CT volumes (12 labeled (20%), 50 unlabeled), the LA (Left Atrium) [27 ###reference_b27###] dataset with 100\n3D GE-MRIs (8 labeled (10%), 72 unlabeled for training, 20 for testing), and the ACDC [5 ###reference_b5###]\ndataset with 100 cardiac MRI scans (70 for training, 10 for validation, 20 for testing, with 7 labeled(10%)\nand 63 unlabeled). For fair comparison, we use the same experimental setup as prior works like CoraNet\nand SS-Net [20 ###reference_b20###, 25 ###reference_b25###, 16 ###reference_b16###], normalizing images\nand applying standard data augmentation. We follow the dataset\nsplits used in previous studies. The ACDC result represents the average performance of four-class segmentation on the test set.\n###figure_2### Implementation Details.\nWe used PyTorch 2.1.0 and an NVIDIA RTX 4090 GPU to experiment, averaging segmentation results from students A and B for evaluation. Pre-training had iterations of 3k (Pancreas), 3k (LA), and 11k (ACDC), while SSL had iterations of 8k (Pancreas), 10k (LA), and 45k (ACDC). We employed the Adam optimizer with a learning rate of 0.001, a\nbatch size of 8, 8, 48 (half for labeled and half for unlabeled data), and set hyperparameters ,\n. Specific values for and were 0.3 and 0.1 (Pancreas), 0.5 and 0.05 (LA and ACDC).\nInput patch sizes were (Pancreas), (LA), and \n(ACDC). Testing used strides of (Pancreas) and (LA) [20 ###reference_b20###, 25 ###reference_b25###, 16 ###reference_b16###], with\nno post-processing during evaluation. Performance metrics included Dice Score (Dice), Jaccard Score (Jac),\n95% Hausdorff Distance (95HD), and Average Surface Distance (ASD).\nResults on the LA and Pancreas Datasets.\nTable 1 ###reference_### provides a detailed comparison of our\napproach with the current SOTA SSMIS methods and presents the performance upper and lower bounds of our two backbones using only labeled data in a fully supervised manner. The table demonstrates significant improvements in our method across four metrics\ncompared to the baseline (BCP) and other SOTA methods. As depicted in Fig. 2 ###reference_###, our approach closely aligns with the Ground Truth, especially in regions prone to errors at boundaries and connections. This underscores the efficacy of our discrepancy correction learning, significantly contributing to enhancing the model\u2019s edge and shape segmentation ability.\nResults on the ACDC Dataset.\nTable. 2 ###reference_### presents a comparative analysis of our results against current methods and the upper and lower bounds of fully supervised methods. Similarly, our method exhibits a substantial improvement over the baseline, particularly evident in a significant enhancement of the ASD metric. Our approach demonstrated a 39% reduction on the metric of ASD relative to the fully supervised upper bound of U-Net. The presence of more 2D slices in the dataset likely contributes to the enhancement of geometric segmentation integrity through discrepancy correction learning.\nAblation Study.\nTable. 3 ###reference_###, we conducted an ablation study on various components of our framework, comparing their impact on Dice scores against the baseline.\nOmitting , \nand individually\nresulted in improvements of 0.43% and 0.38%,\nwhile the simultaneous use of both losses showed\nan enhancement of over 0.53%. Introducing increased the improvements to 1.17% and 1.26%, and utilizing all components achieved a significant improvement of 2.16%. These findings highlight the positive effect of incorporating correct cognition review and self-bias error correction in SDCL on model performance.\nNotably, we observed the most favorable outcomes when simultaneously employing both losses and , affirming the efficacy of our proposed correction learning based on students\u2019 discrepancy.\nFor further insights, the Supplementary Materials include the variation of biased error voxels during the training process, detailed experiment results, and hyperparameter ablation studies in this paper.\nScans used\nComponents\nMetrics\n\nLb\nUnlb\n\n\n\n\nDice\nJac\n95HD\nASD\n\n\n\n\n\n\n\n83.23\n71.57\n8.53\n2.49\n\n\n\n\n\n\n\n83.59\n72.18\n7.20\n2.30\n\n\n\n\n\n\n\n84.20\n73.01\n6.25\n2.03\n\n12\n50\n\n\n\n\n83.55\n72.05\n7.36\n2.09\n\n\n\n\n\n\n\n84.28\n73.12\n6.31\n1.97\n\n\n\n\n\n\n\n83.67\n72.20\n9.12\n2.80\n\n\n\n\n\n\n\n85.04\n74.23\n5.22\n1.48"
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4",
|
| 61 |
+
"parent_section_id": null,
|
| 62 |
+
"section_name": "Conclusion",
|
| 63 |
+
"text": "We propose a novel SSMIS framework, extending the Mean-Teacher with an additional student for correction learning based on student discrepancies. The method aims to review correct voxels and repair self-bias error voxels in discrepancies and is compatible with other teacher-student models. It outperforms existing methods in 2D and 3D tasks.\nFuture research will consider using students\u2019 information to refine teacher and further improve the performance of semi-supervised learning in medical image segmentation.\n{credits}"
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"appendix": [],
|
| 67 |
+
"tables": {
|
| 68 |
+
"1": {
|
| 69 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Performance comparison on the Left Atrium and Pancreas datasets.</figcaption>\n<p class=\"ltx_p\" id=\"S2.T1.1\"><span class=\"ltx_text ltx_inline-block\" id=\"S2.T1.1.1\" style=\"width:433.6pt;\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S2.T1.1.1.1\" style=\"width:390.3pt;height:198.4pt;vertical-align:-0.6pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-106.8pt,54.1pt) scale(0.64633,0.64633) ;\">\n<span class=\"ltx_p\" id=\"S2.T1.1.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S2.T1.1.1.1.1.1.1\" style=\"width:603.8pt;height:307pt;vertical-align:-1.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1,1) ;\">\n<span class=\"ltx_p\" id=\"S2.T1.1.1.1.1.1.1.1\"><span class=\"ltx_text\" id=\"S2.T1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S2.T1.1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.1.1.1\">Method</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t ltx_colspan ltx_colspan_6\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.1.1.2\">Pancreas-CT</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t ltx_colspan ltx_colspan_6\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.1.1.3\">Left Atrium</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2\">\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.2\">Lb</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.3\">Unlb</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.4\">Dice</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.5\">Jac</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.6\">95HD</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.7\">ASD</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.8\">Lb</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.9\">Unlb</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.10\">Dice</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.11\">Jac</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.12\">95HD</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.2.2.13\">ASD</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.1\">V-Net</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.2\">12</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.3\">0</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.4\">70.59</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.5\">56.77</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.6\">14.19</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.7\">2.25</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.8\">8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.9\">0</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.10\">79.87</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.11\">67.60</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.12\">26.65</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.3.3.13\">7.94</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.1\">ResV-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.2\">12</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.4\">68.94</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.5\">54.50</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.6\">13.86</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.7\">3.36</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.8\">8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.9\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.10\">80.07</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.11\">69.29</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.12\">19.50</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.4.4.13\">6.02</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.1\">V-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.2\">62</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.4\">82.60</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.5\">70.81</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.6\">5.61</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.7\">1.33</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.8\">80</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.9\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.10\">91.47</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.11\">84.36</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.12\">5.48</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.5.5.13\">1.51</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.1\">ResV-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.2\">62</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.4\">82.46</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.5\">70.50</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.6\">5.45</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.7\">1.44</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.8\">80</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.9\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.10\">91.09</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.11\">83.90</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.12\">4.77</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.6.6.13\">1.75</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.1\">UA-MT\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib28\" title=\"\">28 ###reference_b28###</a>]</cite>(MICCAI\u201919)</span>\n<span class=\"ltx_td ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.2\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.4\">77.26</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.5\">63.82</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.6\">11.90</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.7\">3.06</span>\n<span class=\"ltx_td ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.8\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.9\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.10\">87.79</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.11\">78.39</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.12\">8.68</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.7.7.13\">2.12</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.1\">DTC\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib14\" title=\"\">14 ###reference_b14###</a>]</cite>(AAAI\u201921)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.4\">78.27</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.5\">64.75</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.6\">8.36</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.7\">2.25</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.10\">87.51</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.11\">78.17</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.12\">8.23</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.8.8.13\">2.36</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.1\">CoraNet\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib20\" title=\"\">20 ###reference_b20###</a>]</cite>(TMI\u201921)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.4\">79.67</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.5\">66.69</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.6\">7.59</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.7\">1.89</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.10\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.11\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.12\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.9.9.13\">-</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.1\">SS-Net\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib25\" title=\"\">25 ###reference_b25###</a>]</cite>(MICCAI\u201922)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.4\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.5\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.6\">-</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.7\">-</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.10\">88.55</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.11\">79.62</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.12\">7.49</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.10.10.13\">1.90</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.1\">MC-Net+\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib24\" title=\"\">24 ###reference_b24###</a>]</cite>(MIA\u201922)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.4\">80.59</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.5\">68.08</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.6\">6.47</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.7\">1.74</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.10\">88.96</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.11\">80.25</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.12\">7.93</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.11.11.13\">1.86</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.1\">CAML\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib8\" title=\"\">8 ###reference_b8###</a>]</cite>(MICCAI\u201923)</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.2\">12</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.3\">50</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.4\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.5\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.6\">-</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.7\">-</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.8\">8</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.9\">72</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.10\">89.62</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.11\">81.28</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.12\">8.76</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.12.12.13\">2.02</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.1\">DMD\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib26\" title=\"\">26 ###reference_b26###</a>]</cite>(MICCAI\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.4\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.5\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.6\">-</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.7\">-</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.10\">89.70</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.11\">81.42</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.12\">6.88</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.13.13.13\">1.78</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.1\">MCCauSSL\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib17\" title=\"\">17 ###reference_b17###</a>]</cite>(ICCV\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.4\">80.92</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.5\">68.26</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.6\">8.11</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.7\">1.53</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.10\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.11\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.12\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.14.14.13\">-</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.1\">UPCoL\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib13\" title=\"\">13 ###reference_b13###</a>]</cite>(MICCAI\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.4\">81.78</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.5\">69.66</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.6\">3.78</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.7\">0.63</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.10\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.11\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.12\">-</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.15.15.13\">-</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.1\">BCP\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib3\" title=\"\">3 ###reference_b3###</a>]</cite>(CVPR\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.4\">82.91</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.5\">70.97</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.6\">6.43</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.7\">2.25</span>\n<span class=\"ltx_td\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.8\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.9\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.10\">89.62</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.11\">81.31</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.12\">6.81</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.16.16.13\">1.76</span></span>\n<span class=\"ltx_tr\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17\">\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.1\">SDCL(Ours)</span>\n<span class=\"ltx_td ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.2\"></span>\n<span class=\"ltx_td ltx_border_b ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.4.1\">85.04</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.5.1\">74.22</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.6.1\">5.22</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.7.1\">1.48</span></span>\n<span class=\"ltx_td ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.8\"></span>\n<span class=\"ltx_td ltx_border_b ltx_border_r\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.9\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.10\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.10.1\">92.35</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.11\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.11.1\">85.83</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.12\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.12.1\">4.22</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.13\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T1.1.1.1.1.1.1.1.1.1.17.17.13.1\">1.44</span></span></span>\n</span>\n</span></span></span>\n</span></span></span></span>\n</span></span></span></p>\n</figure>",
|
| 70 |
+
"capture": "Table 1: Performance comparison on the Left Atrium and Pancreas datasets."
|
| 71 |
+
},
|
| 72 |
+
"2": {
|
| 73 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S2.T2\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Performance on ACDC dataset</figcaption>\n<p class=\"ltx_p\" id=\"S2.T2.1\"><span class=\"ltx_text ltx_inline-block\" id=\"S2.T2.1.1\" style=\"width:433.6pt;\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S2.T2.1.1.1\" style=\"width:390.3pt;height:255.5pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-11.0pt,7.2pt) scale(0.94646,0.94646) ;\">\n<span class=\"ltx_p\" id=\"S2.T2.1.1.1.1\"><span class=\"ltx_text\" id=\"S2.T2.1.1.1.1.1\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S2.T2.1.1.1.1.1.1\" style=\"width:412.3pt;height:270pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1,1) ;\">\n<span class=\"ltx_p\" id=\"S2.T2.1.1.1.1.1.1.1\"><span class=\"ltx_text\" id=\"S2.T2.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S2.T2.1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.1.1\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.1.1.1\">Method</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t ltx_colspan ltx_colspan_6\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.1.1.2\">ACDC</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2\">\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.1\"></span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.2\">Lb</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.3\">Unlb</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.4\">Dice</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.5\">Jac</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.6\">95HD</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.2.2.7\">ASD</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.1\">U-Net</span>\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.2\">7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.3\">0</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.4\">79.41</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.5\">68.11</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.6\">9.35</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.3.3.7\">2.70</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.1\">ResU-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.2\">7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.4\">80.04</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.5\">68.73</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.6\">7.83</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.4.4.7\">1.94</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.1\">U-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.2\">70</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.4\">91.65</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.5\">84.93</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.6\">1.89</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.5.5.7\">0.56</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.1\">ResU-Net</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.2\">70</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.3\">0</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.4\">90.44</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.5\">82.95</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.6\">1.77</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.6.6.7\">0.47</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.1\">UA-MT\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib28\" title=\"\">28 ###reference_b28###</a>]</cite>(MICCAI\u201919)</span>\n<span class=\"ltx_td ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.2\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.4\">81.65</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.5\">70.64</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.6\">6.88</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.7.7.7\">2.02</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.1\">SASSNet\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib11\" title=\"\">11 ###reference_b11###</a>]</cite>(MICCAI\u201920)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.4\">84.50</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.5\">74.34</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.6\">5.42</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.8.8.7\">1.86</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.1\">DTC\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib14\" title=\"\">14 ###reference_b14###</a>]</cite>(AAAI\u201921)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.4\">84.29</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.5\">73.92</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.6\">12.81</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.9.9.7\">4.01</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.1\">SS-Net\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib25\" title=\"\">25 ###reference_b25###</a>]</cite>(MICCAI\u201922)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.4\">86.78</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.5\">77.67</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.6\">6.07</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.10.10.7\">1.40</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.1\">MC-Net+\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib24\" title=\"\">24 ###reference_b24###</a>]</cite>(MIA\u201922)</span>\n<span class=\"ltx_td ltx_align_center\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.2\">7</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.3\">63</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.4\">87.10</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.5\">78.06</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.6\">6.68</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.11.11.7\">2.00</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.1\">DC-Net\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib6\" title=\"\">6 ###reference_b6###</a>]</cite>(MICCAI\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.4\">89.42</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.5\">81.37</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.6\">1.28</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.12.12.7\">0.38</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.1\">BCPCauSSL\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib17\" title=\"\">17 ###reference_b17###</a>]</cite>(ICCV\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.4\">89.66</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.5\">81.79</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.6\">3.67</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.13.13.7\">0.93</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14\">\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.1\">BCP\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.16728v2#bib.bib3\" title=\"\">3 ###reference_b3###</a>]</cite>(CVPR\u201923)</span>\n<span class=\"ltx_td\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.4\">88.84</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.5\">80.62</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.6\">3.98</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.14.14.7\">1.17</span></span>\n<span class=\"ltx_tr\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15\">\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.1\">SDCL(Ours)</span>\n<span class=\"ltx_td ltx_border_b\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.2\"></span>\n<span class=\"ltx_td ltx_border_b ltx_border_r\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.4.1\">90.92</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.5.1\">83.83</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.6\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.6.1\">1.29</span></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.7\"><span class=\"ltx_text ltx_font_bold\" id=\"S2.T2.1.1.1.1.1.1.1.1.1.15.15.7.1\">0.34</span></span></span>\n</span>\n</span></span></span>\n</span></span></span></span>\n</span></span></span></p>\n</figure>",
|
| 74 |
+
"capture": "Table 2: Performance on ACDC dataset"
|
| 75 |
+
},
|
| 76 |
+
"3": {
|
| 77 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S3.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Ablation study results on the pancreas dataset.</figcaption>\n<p class=\"ltx_p\" id=\"S3.T3.22\"><span class=\"ltx_text ltx_inline-block\" id=\"S3.T3.22.22\" style=\"width:433.6pt;\">\n<span class=\"ltx_inline-block ltx_transformed_outer\" id=\"S3.T3.22.22.22.22.22\" style=\"width:310.5pt;height:162pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(0.0pt,0.0pt) scale(1,1) ;\">\n<span class=\"ltx_p\" id=\"S3.T3.22.22.22.22.22.22\"><span class=\"ltx_text\" id=\"S3.T3.22.22.22.22.22.22.22\">\n<span class=\"ltx_tabular ltx_align_middle\" id=\"S3.T3.22.22.22.22.22.22.22.22\">\n<span class=\"ltx_tbody\">\n<span class=\"ltx_tr\" id=\"S3.T3.22.22.22.22.22.22.22.22.23.1\">\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t ltx_colspan ltx_colspan_2\" id=\"S3.T3.22.22.22.22.22.22.22.22.23.1.1\">Scans used</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t ltx_colspan ltx_colspan_4\" id=\"S3.T3.22.22.22.22.22.22.22.22.23.1.2\">Components</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t ltx_colspan ltx_colspan_4\" id=\"S3.T3.22.22.22.22.22.22.22.22.23.1.3\">Metrics</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.4.4.4.4.4.4.4.4.4\">\n<span class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.5\">Lb</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.6\">Unlb</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.1.1.1.1.1.1.1.1.1.1\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.2.2.2.2.2.2.2.2.2.2\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.3.3.3.3.3.3.3.3.3.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.4\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.7\">Dice</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.8\">Jac</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.9\">95HD</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.4.4.4.4.4.4.4.4.4.10\">ASD</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.5.5.5.5.5.5.5.5.5\">\n<span class=\"ltx_td ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.2\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.1\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.4\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.5\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.7\">83.23</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.8\">71.57</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.9\">8.53</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.5.5.5.5.5.5.5.5.5.10\">2.49</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.7.7.7.7.7.7.7.7.7\">\n<span class=\"ltx_td\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.3\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.4\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.6.6.6.6.6.6.6.6.6.1\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.2\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.5\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.7\">83.59</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.8\">72.18</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.9\">7.20</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.7.7.7.7.7.7.7.7.7.10\">2.30</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.10.10.10.10.10.10.10.10.10\">\n<span class=\"ltx_td\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.4\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.5\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.8.8.8.8.8.8.8.8.8.1\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.9.9.9.9.9.9.9.9.9.2\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.7\">84.20</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.8\">73.01</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.9\">6.25</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.10.10.10.10.10.10.10.10.10.10\">2.03</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.12.12.12.12.12.12.12.12.12\">\n<span class=\"ltx_td ltx_align_center\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.3\">12</span>\n<span class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.4\">50</span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.11.11.11.11.11.11.11.11.11.1\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.5\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.2\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.7\">83.55</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.8\">72.05</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.9\">7.36</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.12.12.12.12.12.12.12.12.12.10\">2.09</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.15.15.15.15.15.15.15.15.15\">\n<span class=\"ltx_td\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.4\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.5\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.13.13.13.13.13.13.13.13.13.1\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.14.14.14.14.14.14.14.14.14.2\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.3\"></span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.7\">84.28</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.8\">73.12</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.9\">6.31</span>\n<span class=\"ltx_td ltx_align_left\" id=\"S3.T3.15.15.15.15.15.15.15.15.15.10\">1.97</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.18.18.18.18.18.18.18.18.18\">\n<span class=\"ltx_td\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.4\"></span>\n<span class=\"ltx_td ltx_border_r\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.5\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.16.16.16.16.16.16.16.16.16.1\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.17.17.17.17.17.17.17.17.17.2\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_r ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.3\"></span>\n<span class=\"ltx_td ltx_border_r ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.7\">83.67</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.8\">72.20</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.9\">9.12</span>\n<span class=\"ltx_td ltx_align_left ltx_border_t\" id=\"S3.T3.18.18.18.18.18.18.18.18.18.10\">2.80</span></span>\n<span class=\"ltx_tr\" id=\"S3.T3.22.22.22.22.22.22.22.22.22\">\n<span class=\"ltx_td ltx_border_b\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.5\"></span>\n<span class=\"ltx_td ltx_border_b ltx_border_r\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.6\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S3.T3.19.19.19.19.19.19.19.19.19.1\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S3.T3.20.20.20.20.20.20.20.20.20.2\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S3.T3.21.21.21.21.21.21.21.21.21.3\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b ltx_border_r\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.4\"></span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.7\">85.04</span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.8\">74.23</span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.9\">5.22</span>\n<span class=\"ltx_td ltx_align_left ltx_border_b\" id=\"S3.T3.22.22.22.22.22.22.22.22.22.10\">1.48</span></span>\n</span>\n</span></span></span>\n</span></span></span></p>\n</figure>",
|
| 78 |
+
"capture": "Table 3: Ablation study results on the pancreas dataset."
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
"image_paths": {
|
| 82 |
+
"1": {
|
| 83 |
+
"figure_path": "2409.16728v2_figure_1.png",
|
| 84 |
+
"caption": "Figure 1: \nOverview of the proposed students discrepancy-informed correction learning (SDCL) framework for\nsemi-supervised medical image segmentation.\nThe SDCL framework adopts a BCP strategy, merging two labeled and two unlabeled images to create mix images. The teacher uses\nunlabeled images to generate pseudo-labels. Ground truths and pseudo-labels are then mixed to\nproduce mix labels. Students A and B process the mixed images separately to calculate\nsegmentation loss. Finally, mask computation determines discrepancy and error masks, guiding the correction learning process.",
|
| 85 |
+
"url": "http://arxiv.org/html/2409.16728v2/extracted/5901408/ff.jpg"
|
| 86 |
+
},
|
| 87 |
+
"2": {
|
| 88 |
+
"figure_path": "2409.16728v2_figure_2.png",
|
| 89 |
+
"caption": "Figure 2: 2D and 3D Visualization of segmentation results on Left Atrium dataset.",
|
| 90 |
+
"url": "http://arxiv.org/html/2409.16728v2/extracted/5901408/visual.jpg"
|
| 91 |
+
}
|
| 92 |
+
},
|
| 93 |
+
"validation": true,
|
| 94 |
+
"references": [],
|
| 95 |
+
"url": "http://arxiv.org/html/2409.16728v2"
|
| 96 |
+
}
|
20241004/2409.18256v2.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Amodal Instance Segmentation with Diffusion Shape Prior Estimation",
|
| 3 |
+
"abstract": "Amodal Instance Segmentation (AIS) presents an intriguing challenge, including the segmentation prediction of both visible and occluded parts of objects within images. Previous methods have often relied on shape prior information gleaned from training data to enhance amodal segmentation. However, these approaches are susceptible to overfitting and disregard object category details. Recent advancements highlight the potential of conditioned diffusion models, pretrained on extensive datasets, to generate images from latent space. Drawing inspiration from this, we propose AISDiff with a Diffusion Shape Prior Estimation (DiffSP) module. AISDiff begins with the prediction of the visible segmentation mask and object category, alongside occlusion-aware processing through the prediction of occluding masks. Subsequently, these elements are inputted into our DiffSP module to infer the shape prior of the object. DiffSP utilizes conditioned diffusion models pretrained on extensive datasets to extract rich visual features for shape prior estimation. Additionally, we introduce the Shape Prior Amodal Predictor, which utilizes attention-based feature maps from the shape prior to refine amodal segmentation. Experiments across various AIS benchmarks demonstrate the effectiveness of our AISDiff.",
|
| 4 |
+
"sections": [],
|
| 5 |
+
"appendix": [],
|
| 6 |
+
"tables": {},
|
| 7 |
+
"image_paths": {},
|
| 8 |
+
"validation": true,
|
| 9 |
+
"references": [],
|
| 10 |
+
"url": "http://arxiv.org/html/2409.18256v2"
|
| 11 |
+
}
|
20241004/2409.18881v2.json
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Explainable Artifacts for Synthetic Western Blot Source Attribution",
|
| 3 |
+
"abstract": "Recent advancements in artificial intelligence have enabled generative models to produce synthetic scientific images that are indistinguishable from pristine ones, posing a challenge even for expert scientists habituated to working with such content. When exploited by organizations known as paper mills, which systematically generate fraudulent articles, these technologies can significantly contribute to the spread of misinformation about ungrounded science, potentially undermining trust in scientific research. While previous studies have explored black-box solutions, such as Convolutional Neural Networks, for identifying synthetic content, only some have addressed the challenge of generalizing across different models and providing insight into the artifacts in synthetic images that inform the detection process. This study aims to identify explainable artifacts generated by state-of-the-art generative models (e.g., Generative Adversarial Networks and Diffusion Models) and leverage them for open-set identification and source attribution (i.e., pointing to the model that created the image).",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Numerous problematic scientific articles have recently been reported, presenting distinctive features that suggest they were systematically produced. Dr. J. Christopher, an editor of FEBS PRESS, was the first to report multiple manuscripts with doctored figures received by their editorial board [1 ###reference_b1###]. These figures shared similar backgrounds and unique characteristics despite being attributed to different authors.\nFollowing this report, thousands of other articles have been flagged as systematically produced and subsequently retracted by other journals [2 ###reference_b2###], with most published in the medical and biological fields [3 ###reference_b3###].\nThe case has been attributed to potentially illegal organizations, known as paper mills, which provide scientific writing and publishing services for papers seemingly lacking scientific merit [4 ###reference_b4###]. Recent investigations have found that this industry generates millions of dollars worldwide and has been \u201cbribing editors and planting their agents on editorial boards to ensure publication\u201d [5 ###reference_b5###].\nTo worsen the issue, recent advancements in generative Artificial Intelligence (AI) models, along with their increasing accessibility, could aid paper mills in expanding their production.\nPrevious work has demonstrated that AI synthetic scientific images can be indistinguishable from genuine ones [6 ###reference_b6###] and could threaten scientific integrity. Such a threat may have already materialized, as recent publications have been retracted for using AI-generated images [7 ###reference_b7###].\nWestern blots are the most concerned type of scientific image in this context because they are easily generated using AI [6 ###reference_b6###] and are frequently used in publications by paper mills [1 ###reference_b1###, 4 ###reference_b4###]. These images derive from biomedical experimental procedures used in laboratories to detect and measure protein levels.\nMore than 400,000 research works rely on these images, accounting only for those listed in PubMed [8 ###reference_b8###], a large repository of biomedical articles.\nA possible approach to identify paper mills involves analyzing the similarity of systematically produced images with image provenance analysis [9 ###reference_b9###]. However, generative AI could easily produce never-before-seen images, which makes their identification more challenging.\nAnother method is to identify the source model that generated the AI figures, tracking mills by the models they are using. When designing such a solution, we should consider the complex nature of paper mills and the severe implications of falsely accusing authors. A solution to this problem should not only determine the source model of an image but also provide a clear explanation for its decision.\nTherefore, in this work, we explore forensic solutions to identify and attribute the source of synthetic Western blots. We rely on explainable low-level artifacts from AI generation methods.\nOur contributions are threefold:\nAn analysis of low-level artifacts present on synthetic Western blot images;\nNew methods to expose AI artifacts, namely (i) by analyzing image patches using the Fourier spectrum and\n(ii) by the analysis of texture-based features;\nAn analysis of the residual-noise impact on exposing synthetic artifacts.\nThe code and dataset from our research are available at https://github.com/phillipecardenuto/ai-wblots-detector ###reference_lots-detector###"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "II AI Generation Artifacts",
|
| 15 |
+
"text": "This section investigates possible sources of synthetic generation artifacts and lists some promising features that can be exploited to spot AI-generated images and perform generation model source attribution."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "2.1",
|
| 19 |
+
"parent_section_id": "2",
|
| 20 |
+
"section_name": "II-A Common AI Generation Artifacts",
|
| 21 |
+
"text": "###figure_1### ###figure_2### Most generative AI models work as autoencoders [10 ###reference_b10###]. They first encode an input signal into a latent space and then decode it into an intelligible media such as text, images, or audio.\nIn the case of images, the process\ntransforms a one-dimensional vector into a two-dimensional array (i.e., the output image). During this process, most models perform an upsampling operation to increase the size of the image, which typically adds specific artifacts [11 ###reference_b11###].\nSuch sampling artifacts occur unnaturally and at a periodic rate that can be exposed using, for instance, Fourier spectrum analysis [12 ###reference_b12###, 13 ###reference_b13###]. Due to their nature, we refer to them as periodic artifacts.\nCheckerboard artifacts are well-known examples in this category. As depicts Fig. 1 ###reference_###, these artifacts appear as repetitive checkerboard-like patterns in AI-generated images. Odena et al. [11 ###reference_b11###] demonstrated that deconvolutional kernels, commonly used by state-of-the-art GAN-based models, are the primary cause of these artifacts. During the deconvolution operation (upsampling), if the kernel size is not divisible by the deconvolution stride, overlapping regions are created in the output for two neighboring pixels from the input image. This overlap occurs at a periodic rate, producing the checkerboard effect. The checkerboard pattern creates distinct high-frequency band peaks in a Fourier spectrum, generating a unique data fingerprint. For instance, popular GAN-based generators like CycleGAN [14 ###reference_b14###] and Pix2Pix [15 ###reference_b15###] use indivisible parameters (kernel size of 3 and stride of 2) for deconvolution.\nThis results in a noticeable checkerboard effect on their generated samples, as shows Fig. 1 ###reference_###.\nOdena et al. [11 ###reference_b11###] suggested using a resize-convolution operation to avoid these artifacts. This operation first resizes the signal using nearest-neighbor interpolation and then applies a standard convolutional kernel, avoiding the overlap. This fix has been used on the upsampling implementation of recent generators to improve their output [16 ###reference_b16###].\nHowever, while this approach may address the checkerboard pattern, it still produces a linear combination of the pixels\u2019 neighborhood in the output image [11 ###reference_b11###]. Therefore, this operation alone cannot eliminate the periodic artifacts, which could still be detected in the Fourier spectrum [17 ###reference_b17###]."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "2.2",
|
| 25 |
+
"parent_section_id": "2",
|
| 26 |
+
"section_name": "II-B Exposing AI generation artifacts",
|
| 27 |
+
"text": "Over the years, the forensic community has proposed multiple solutions to expose periodic artifacts on natural images.\nIn the following, we report strategies that are most employed in the state of the art, dividing them into two categories: hand-crafted features and deep learning features.\nOne of the first works to detect periodic artifacts on natural photos was proposed by Popescu and Farid[18 ###reference_b18###].\nThey focused on detecting traces of resampling in an image, aiming to understand the specific artifacts that resizing operations leave behind.\nThe key idea was that resampling creates a correlation among the pixels\u2019 neighborhoods, similar to the uneven overlap of checkerboard artifacts. To highlight this correlation,\nthey used an Expectation Maximization (EM) algorithm to identify an optimal linear combination that describes how each pixel relates to its neighbors. Using EM, they built a probabilistic map (P-Map) that provides the likelihood of a pixel correlated to its neighbors. This map can identify levels of resampling and possibly expose the periodic artifacts left by the AI generation process.\nFollowing [18 ###reference_b18###], Kirchner [19 ###reference_b19###] noticed that the P-Map periodicity originated from the EM optimization\u2019s residuum function. He showed that the residuum could be interpreted as a linear filtered version of the original signal that exposes the periodic artifacts.\nHe thus proposed to replace the EM solution with a specific filter to extract the residuum.\nRecent studies have shown that residual noises extracted from AI-generated images can reveal periodic artifacts when analyzing identifiable high-frequency peaks in their Fourier spectrum [12 ###reference_b12###, 13 ###reference_b13###].\nBased on that, Bammey [20 ###reference_b20###] computed the Fourier transform of the residual noise and created a feature vector\nfrom different frequency bands.\nHe trained a supervised classifier using this feature and achieved promising results in distinguishing fake from pristine images.\nIn his work, Bammey qualitatively showed that these peaks manifest differently in images generated by different diffusion model classes.\nCo-occurrence matrices are another promising approach to exposing synthetic content [21 ###reference_b21###, 22 ###reference_b22###].\nIn the context of scientific images, Mandelli et al. [23 ###reference_b23###] employed texture descriptors derived from the gray level co-occurrence matrix (GLCM) to distinguish genuine Western blot images from synthetic versions generated through diverse architectures, including GANs and diffusion models.\nBesides hand-crafted techniques, deep learning-based features have also been used to distinguish pristine images from AI images.\nFor instance, Cocchi et al. [24 ###reference_b24###] used pre-trained Contrastive Language-Image Pretraining (CLIP) [25 ###reference_b25###] and Self-Distillation with No Labels (DINO) [26 ###reference_b26###] models as feature extractors to distinguish fake from genuine samples.\nUnlike hand-crafted feature extractors, CLIP and DINO are data-driven models trained on large datasets with millions of parameters. This makes it intractable to understand the specific role of each parameter and explain their decisions, hindering the possibility of finding and interpreting their potential biases. Therefore, we should be skeptical about the generalizability of such methods."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3",
|
| 31 |
+
"parent_section_id": null,
|
| 32 |
+
"section_name": "III Proposed Techniques to Expose AI Artifacts",
|
| 33 |
+
"text": "###figure_3### Given a questioned Western blot, the problem we address in this research is learning to identify it as either pristine or synthetic and, in the latter case, learning to attribute the AI-generation model that might have been used to create it.\nFig. 2 ###reference_### summarizes the problem and our proposed solution workflow.\nAs one might observe, we explore residual noise extraction (see Sec. III-C ###reference_###) and the analysis of explainable periodic artifacts (Sec. III-A ###reference_###) and texture features (Sec. III-B ###reference_###) to train classifiers that accomplish the task at hand.\nOur approach, which involves identifying the distinct artifact-based features of each generative model, has practical implications. It could potentially enable the identification and appropriate action against a paper mill, if necessary."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1",
|
| 37 |
+
"parent_section_id": "3",
|
| 38 |
+
"section_name": "III-A Patch-based Periodic Artifacts",
|
| 39 |
+
"text": "Most low-level artifacts explored for synthetic image detection are exposed after extracting a residual noise from an input image. However, to our knowledge, none of the previously reported studies in Section II-B ###reference_### considered that periodic samples introduced by AI generation should be equally distributed over the image\u2019s residual noise, independently of the semantic content depicted in the image. This concept is crucial because analyzing the entire image may raise the possibility of inadvertently including semantic elements from the objects depicted in the image in our analysis.\nTo minimize the impact of these semantics, we propose to split the image into patches and extract artifacts from the residuals of these patches. We then combine the patch contributions by averaging the Fourier spectrum of their residuals.\nWe name this patch-based residual Fourier transform strategy PATCH-FFT-PEAKS, while the typical full-image version is FFT-PEAKS.\nFig. 3 ###reference_### illustrates the difference between a spectrum computed directly from the residual noise and one derived from the combined patches. It is worth noticing that the artifacts are more prominently highlighted in the latter case.\n###figure_4### ###figure_5###"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "III-B Fourier-based Texture Features",
|
| 45 |
+
"text": "Motivated by the successful texture feature extraction in [23 ###reference_b23###], we propose inspecting the GLCM matrices of different generation models.\nEvery generation model is associated with a distinct matrix. To highlight this uniqueness, we calculate the Fourier spectrum of each GLCM matrix, resulting in a visibly more distinguishable pattern.\nWe name this proposed solution as FFT-GLCM.\nSimilar to the approach in [23 ###reference_b23###], during our experiments, we extract contrast-weighted, homogeneity-weighted, dissimilarity-weighted, energy, and correlation-weighted features from the FFT-GLCM at distances of 4, 8, 16, and 32, in both horizontal and vertical directions. These features are then concatenated to form a feature vector with 40 dimensions.\n###figure_6### Fig. 4 ###reference_### compares the Fourier Spectrum (first row), the Patch-based Fourier Spectrum (second row), the GLCM (third row), and the proposed FFT-GLCM (fourth row) for the average of a hundred samples from each column-wise generation source.\nWhile the Fourier and GLCM spectra exhibit faint artifact peaks, patch-based Fourier and FFT-GLCM emphasize each generator\u2019s artifacts, resulting in unique strong patterns for each generator."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.3",
|
| 49 |
+
"parent_section_id": "3",
|
| 50 |
+
"section_name": "III-C Residual Noise Extraction",
|
| 51 |
+
"text": "The extraction of noise residues from an image to perform forensic investigations is well known and widely used in the forensic community [18 ###reference_b18###, 19 ###reference_b19###, 12 ###reference_b12###].\nMoreover, the state of the art has proposed many different types of residual noise extraction.\nTo our knowledge, no study has performed experiments to test how different noise extraction techniques impact a detector\u2019s performance.\nThis section explores different noise extraction methods to check their impact on the detection of AI generation artifacts.\nThe selected noise extraction methods are:\nMandelli et al. [23 ###reference_b23###]: this method convolves a kernel (see Eq. 1 ###reference_###) with the image, then computes the difference between the image and the 2D convolution, acting as a high-pass filter.\nBammey [20 ###reference_b20###]: similar to [23 ###reference_b23###], but using the cross kernel (see Eq. 1 ###reference_###).\nGaussian: similar to [23 ###reference_b23###], but using a Gaussian kernel with and radius .\nMean: similar to [23 ###reference_b23###], but using a neighborhood mean kernel (see Eq. 1 ###reference_###).\nKirchner [19 ###reference_b19###]: it uses the kernel (see Eq. 1 ###reference_###) proposed in [19 ###reference_b19###] to extract the residual noise from an image.\nP-Map: instead of noise extraction, this method uses the P-Map solution proposed in [18 ###reference_b18###]\nNon-Local Means [27 ###reference_b27###]:\nIt uses the non-local means technique, which preserves texture and periodic elements while denoising the target image [27 ###reference_b27###].\nThe exploited kernels and are listed as follows:"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4",
|
| 55 |
+
"parent_section_id": null,
|
| 56 |
+
"section_name": "IV Experiments and Analysis",
|
| 57 |
+
"text": "In this section, we present the achieved results by testing the proposed techniques to expose AI artifacts in three different scenarios: (i) closed-set source attribution, (ii) open-set attribution, and (iii) one-vs-rest source attribution. More details follow in the next lines."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.1",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "IV-A Experimental Setup",
|
| 63 |
+
"text": "We adopt as baselines the data-driven features from DINOv1, DINOv2, and CLIP (Cocchi et al.[24 ###reference_b24###]), texture features (Mandelli et al. [23 ###reference_b23###]), and periodic artifacts (Bammey\u2019s Synthbuster [20 ###reference_b20###]). Additionally, we report results for the extended and proposed texture features (GLCM and FFT-GLCM) and periodic artifacts (FFT-PEAKS and PATCH-FFT-PEAKS).\nFor [23 ###reference_b23###] and [20 ###reference_b20###] methods, we adopt their original noise extractors. For the other methods, we present the results based on the most effective residual noise extraction technique, defined as Best Noise Extraction (BNE).\nHere, we approach model attribution as a supervised closed-set classification task.\nThe goal is to determine whether the features under investigation can identify a known source model. Given an image, we aim at inferring if it is genuine or generated by specific AI models, all of which were known during the training phase.\nThis situation can occur when analysts are already familiar with paper mills\u2019 most commonly used generators. Despite its simplicity, this controlled setting serves as a starting point for our investigations.\nFor this task, we employ well-known, explainable, high-performance machine learning classifiers using the investigated features. Specifically, we use Random Forest (RF) and eXtreme Gradient Boosting (XGBoost).\nDuring the evaluation,\nwe use the dataset proposed in [23 ###reference_b23###], which consists of synthetic Western blots generated by CycleGAN, Pix2Pix, StyleGan2, and Denoising Diffusion Probabilistic Models (DDPM), as well as a set of pristine Western blot images, totaling five different sources.\nWe selected 6,000 images from each source.\nWe test the models using a cross-validation setup, where half of the data is used for training and the other half for testing. We used the multi-class Area Under the Curve (AUC) and balanced accuracy (Bacc) to measure the classifiers\u2019 performance. The AUC is calculated under one-vs-all settings and micro-averaging.\nTable I ###reference_### presents the results. Both RF and XGBoost perform similarly, regardless of the features used. All investigated artifacts and features yield high results, demonstrating that the features can encapsulate the fingerprint of each source data in a closed-set scenario.\nThis task simulates a more challenging scenario without information about the generative models employed to create the synthetic images. Specifically, we train a one-class classifier using data from genuine sources and evaluate it using synthetic and pristine data. Our goal is to determine if the features of pristine data are distinguishable from those of synthetic data, which were not seen during training.\nTo make the scenario even more realistic and challenging, we consider two genuine Western blot sources, and we assess the model\u2019s ability to generalize between them. The first pristine source includes Western blots from [23 ###reference_b23###], extracted from scientific articles. The second source consists of raw Western blot data downloaded from Figshare, a scientific repository for raw data release.\nNote that pristine data in [23 ###reference_b23###] likely underwent post-processing, such as compression and contrast adjustment, typically used during articles\u2019 preparation. In contrast, the Figshare raw dataset consists of unprocessed images stored in TIFF files.\nIn our experiments, we adopt a cross-validation protocol with a train set of 3,000 genuine data samples from [23 ###reference_b23###] and a test set of 3,000 samples from Figshare, along with the rest of synthetic data collected from [23 ###reference_b23###]. Then, we swap the genuine source data in each split and repeat the experiment. This setup prevents the model from overfitting to one pristine data source. It provides a more realistic scenario where the training pristine data source may differ from the data encountered during inference.\nWe use Isolation Forest (IF) and Probabilistic PCA (PPCA) as one-class classifiers. We employ the scikit-learn [28 ###reference_b28###] implementations of IF and PPCA with default settings.\nPPCA\u2019s main components captured 95% of the variance.\nDuring the evaluation, we calculate Bacc using the likelihood threshold that maximizes this metric, aiming to find an upper bound for the artifacts and classifiers.\nTable II ###reference_### presents the open-set results. The best-performing Bacc feature is FFT-PEAKS with the Gaussian kernel for noise extraction. FFT-GLCM outperforms this task\u2019s baselines and the GLCM technique and achieves results comparable to FFT-PEAKS.\nNotably, in this scenario, deep-learning features performs lower than explicable artifacts. Both baselines are improved by exploring different residual noise techniques, as proposed in the work herein.\nThis scenario investigates whether the artifacts from each generator can be distinguished when the classifier is trained using only one known data source, which can be pristine or synthetic.\nTo this purpose, we train a one-class classifier for each of the five sources from the dataset in [23 ###reference_b23###].\nThe query image is attributed to the model that provides the highest likelihood.\nIt is worth noticing that this approach can be easily extended to an open-set configuration, as a likelihood threshold can be used to decide whether an input image belongs to a known source. For example, if classifier A has the highest likelihood among the classifiers but it is still below a confidence threshold , the input can be considered unknown.\nWe use a two-fold cross-validation approach where 3,000 images from each synthetic data source are included in each split. Additionally, 3,000 genuine Western blots from dataset [23 ###reference_b23###] and 3,000 from Figshare are included in different splits, similarly to the open-set scenario.\nWe use the same one-class classifiers from the open-set scenario and measure their performance using AUC (micro-averaging) and Bacc.\nTable III ###reference_### presents the results for the one-vs-rest attribution task. Unlike the open-set task, DINOv1 achieves the best Bacc (0.945) and AUC (0.944) using the kernel from [23 ###reference_b23###] during the residual noise extraction and PPCA classifier. As indicated by Fig. 5 ###reference_### and 6 ###reference_###, if no noise extraction is performed, DINOv1\u2019s Bacc drops to 0.910 and AUC to 0.894 with PPCA.\nIn this scenario, PATCH-FFT-PEAKS outperforms the baseline and FFT-PEAKS, achieving the best Bacc among explainable methods. It also shows AUC results comparable to GLCM.\nNotably, for the IF classifier, Synthbuster improves its Bacc by 12 percentage points (pp) and Mandelli\u2019s texture-based method by 9 pp when using different residual noise techniques other than their original suggestions.\nIn this experiment, we explore the responses of the presented features and artifacts to a range of residual noise extraction techniques. We employ the methods outlined in Section III-C ###reference_### to extract the residual noise at each artifact\u2019s workflow. We then proceed to evaluate the impact of each noise extractor by testing it on the one-vs-rest attribution task using the IF and PPCA classifiers.\nFig. 5 ###reference_### presents the Bacc results for this setup, while Fig. 6 ###reference_### shows the AUC. We omit DINOv2 feature from this experiment since it behaves similarly to DINOv1. We include a more detailed version of these figures in the Supplementary Materials.\nThe importance of choosing a proper residual noise extraction technique for each feature and artifact becomes evident by analyzing the figures. For instance, features such as CLIP and GLCM show about 40 pp variation in Bacc depending on the choice of noise extraction for the IF and PPCA classifiers.\nAs expected, the least favorable outcomes for most features manifest when no noise extraction is implemented (i.e., \u201cNo Extraction\u201d). Moreover, selecting a suitable residual noise extraction method can enhance Synthbuster and Mandelli et al.\u2019s techniques\u2019 original implementations, underscoring our findings\u2019 relevance."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "5",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Discussion",
|
| 69 |
+
"text": "Our experiments aimed to understand how hand-crafted explainable artifacts and data-driven features perform in a source attribution task, avoiding black-box classifiers.\nWe began our research with a closed-set scenario, where all data sources were known during training. As anticipated, all artifacts demonstrated excellent results, providing a strong foundation for our study.\nIn a more complex scenario, with an open set formulation, we tested the artifacts using one-class classifiers trained only on genuine data. During testing, the detectors had to distinguish pristine from synthetic content. Results indicated that hand-crafted features were more effective than deep learning ones, with PATCH-FFT-PEAKS achieving one of the best performances (0.863 Bacc and 0.937 AUC with Isolation Forest as the classifier and using kernel while extracting the residual noise). The open-set results highlighted potential biases of deep-learning features and the generalization properties of the hand-crafted ones.\nOur research has practical implications, particularly in the context of real-world problems like tracking paper mills. We performed a one-vs-all attribution using one-class classifiers, a method that can be extended to an open-set scenario. Our experiments showed promising results with pre-trained deep-learning features from DINO-v1, especially with Probabilistic PCA. Combined with explainable artifacts like PATCH-FFT-PEAKS, this approach could enhance source attribution tasks, as demonstrated by their strong performance with Isolation Forest.\nWe further explored the impact of different residual noise extraction techniques on the performance of each investigated feature. Our results showed that different noise extraction methods benefited each feature type (periodic-based, texture-based, or deep-learning-based). For instance, FFT-PEAKS showed a 12 pp improvement in Bacc using kernel instead of its original method. Future work should investigate how residual noise correlates with each artifact and explore combinations of noise extraction techniques that could enhance each artifact\u2019s exposure."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "6",
|
| 73 |
+
"parent_section_id": null,
|
| 74 |
+
"section_name": "VI Conclusions and future work",
|
| 75 |
+
"text": "With the advancement of generative AI models, paper mills may soon use these technologies to scale their production of fake content.\nBy investigating the problem, we found that a possible way to track paper mills is to detect the AI models they might be using.\nOur work focused on Western blots due to their\nprevalence and vulnerability to paper mills.\nThese images have already been synthetically generated by AI [23 ###reference_b23###] and have proven difficult for experts to distinguish them [6 ###reference_b6###].\nAs different models may accentuate different types of artifacts, our work did not aim to design a single solution that could track all AI models (which might not exist) but to develop and promote explainable solutions to expose synthetic data.\nOur analysis focused on low-level artifacts from periodic and texture features left as fingerprints by the AI models. We improved state-of-the-art periodic features [20 ###reference_b20###] for source attribution by combining different image patches and analyzing their resultant Fourier spectrum. We also improved the state-of-the-art texture artifacts [23 ###reference_b23###] in an open-set task, where models were trained only with artifacts from genuine data and tested with both synthetic and pristine data.\nAnother contribution of our work was exploring different types of residual noise extractors for source attribution. The experiments indicated that this part of the workflow is crucial for exposing synthetic artifacts. By choosing a better residual noise extractor, we improved Synthbuster by 12 pp and Mandelli et al.\u2019s work by 9 pp in terms of balanced accuracy when using Isolation Forest for a one-vs-rest source attribution task.\nIn addition to the analysis and the proposed explainable features, our work aims to foster addressing the problem of paper mills, calling on the forensic community for more discussions, research, and solutions to this serious issue. Thus, possible paths to continue this research should consider other types of generative models and scientific images. Additionally, one should investigate new residual noise extraction techniques to expose AI fingerprints. Also, future open-set solutions could be important for detecting unknown generative models. Finally, another promising research path is exploiting other artifacts derived from the linear combination of AI-generated pixels, linking this aspect to the generative models."
|
| 76 |
+
}
|
| 77 |
+
],
|
| 78 |
+
"appendix": [],
|
| 79 |
+
"tables": {
|
| 80 |
+
"1": {
|
| 81 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T1.2.1.1\" style=\"font-size:90%;\">TABLE I</span>: </span><span class=\"ltx_text\" id=\"S4.T1.3.2\" style=\"font-size:90%;\">Cross-Validation Closed-set Attribution Results</span></figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T1.4\" style=\"width:433.6pt;height:253.4pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(47.4pt,-27.7pt) scale(1.27964983233756,1.27964983233756) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T1.4.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.1.1.1.1\">Feature</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T1.4.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.1.1.2.1\">Bacc</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T1.4.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.1.1.3.1\">AUC</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row\" id=\"S4.T1.4.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.4.1.2.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.2.2.2.1\">RF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.2.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.2.2.3.1\">XGBoost</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.4.1.2.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.2.2.4.1\">RF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.4.1.2.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.2.2.5.1\">XGBoost</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T1.4.1.3.1.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.3.1.1.1\">DINOv1 (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.3.1.2\">0.989</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.3.1.3\">0.995</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.3.1.4\">0.999</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.3.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.3.1.5.1\">1.000</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.4.1.4.2.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.4.2.1.1\">DINOv2 (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.4.2.2\">0.968</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.4.1.4.2.3\">0.985</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.4.2.4\">0.998</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.4.2.5\">0.999</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.4.1.5.3.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.5.3.1.1\">CLIP (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.5.3.2\">0.887</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.4.1.5.3.3\">0.923</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.5.3.4\">0.988</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.5.3.5\">0.994</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T1.4.1.6.4.1\">\n<span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.6.4.1.1\">Mandelli et al</span>.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib23\" title=\"\">23</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.6.4.2\">0.968</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.6.4.3\">0.977</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.6.4.4\">0.998</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.6.4.5\">0.999</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.7.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.4.1.7.5.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.7.5.1.1\">GLCM (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.7.5.2\">0.987</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.4.1.7.5.3\">0.991</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.7.5.4\">0.999</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.7.5.5\">0.999</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.8.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.4.1.8.6.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.8.6.1.1\">FFT-GLCM (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.8.6.2\">0.952</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.4.1.8.6.3\">0.961</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.8.6.4\">0.997</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.8.6.5\">0.998</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.9.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T1.4.1.9.7.1\">\n<span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.9.7.1.1\">Synthbuster</span>\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib20\" title=\"\">20</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.9.7.2\">0.956</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T1.4.1.9.7.3\">0.970</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.9.7.4\">0.998</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.4.1.9.7.5\">0.999</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.10.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.4.1.10.8.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.10.8.1.1\">FFT-PEAKS (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.10.8.2\">0.988</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T1.4.1.10.8.3\">0.991</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.10.8.4\">0.999</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.4.1.10.8.5\">0.999</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.4.1.11.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T1.4.1.11.9.1\"><span class=\"ltx_text ltx_font_italic\" id=\"S4.T1.4.1.11.9.1.1\">PATCH-FFT-PEAKS (BNE)</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.4.1.11.9.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.11.9.2.1\">0.996</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T1.4.1.11.9.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.11.9.3.1\">0.996</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.4.1.11.9.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.11.9.4.1\">1.000</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T1.4.1.11.9.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.4.1.11.9.5.1\">1.000</span></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 82 |
+
"capture": "TABLE I: Cross-Validation Closed-set Attribution Results"
|
| 83 |
+
},
|
| 84 |
+
"2": {
|
| 85 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T2.2.1.1\" style=\"font-size:90%;\">TABLE II</span>: </span><span class=\"ltx_text\" id=\"S4.T2.3.2\" style=\"font-size:90%;\">Cross-validation Open-set Classification Results</span></figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T2.4\" style=\"width:433.6pt;height:275.5pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(61.0pt,-38.7pt) scale(1.39130681706911,1.39130681706911) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T2.4.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.1.1.1.1\">Feature</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T2.4.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.1.1.2.1\">Bacc</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T2.4.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.1.1.3.1\">AUC</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row\" id=\"S4.T2.4.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.1.2.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.2.2.2.1\">IF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.2.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.2.2.3.1\">PPCA</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.1.2.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.2.2.4.1\">IF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.4.1.2.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.2.2.5.1\">PPCA</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.4.1.3.1.1\">DINOv1 (BNE)</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.3.1.2\">0.799</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.3.1.3\">0.831</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.3.1.4\">0.847</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.3.1.5\">0.880</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.4.1.4.2.1\">DINOv2 (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.4.2.2\">0.765</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.1.4.2.3\">0.796</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.4.2.4\">0.820</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.4.2.5\">0.852</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.4.1.5.3.1\">CLIP (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.5.3.2\">0.746</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.1.5.3.3\">0.780</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.5.3.4\">0.750</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.5.3.5\">0.848</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.4.1.6.4.1\">Mandelli et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib23\" title=\"\">23</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.6.4.2\">0.704</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.6.4.3\">0.533</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.6.4.4\">0.746</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.6.4.5\">0.482</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.7.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.4.1.7.5.1\">GLCM (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.7.5.2\">0.792</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.1.7.5.3\">0.834</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.7.5.4\">0.856</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.7.5.5\">0.875</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.8.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.4.1.8.6.1\">FFT-GLCM (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.8.6.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.8.6.2.1\">0.865</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.1.8.6.3\">0.860</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.8.6.4\">0.890</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.8.6.5\">0.885</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.9.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T2.4.1.9.7.1\">Synthbuster\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib20\" title=\"\">20</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.9.7.2\">0.833</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T2.4.1.9.7.3\">0.533</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.9.7.4\">0.840</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.4.1.9.7.5\">0.518</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.10.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T2.4.1.10.8.1\">FFT-PEAKS (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.10.8.2\">0.856</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T2.4.1.10.8.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.10.8.3.1\">0.865</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.10.8.4\">0.934</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.4.1.10.8.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.10.8.5.1\">0.933</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.4.1.11.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T2.4.1.11.9.1\">PATCH-FFT-PEAKS (BNE)</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T2.4.1.11.9.2\">0.863</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T2.4.1.11.9.3\">0.864</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T2.4.1.11.9.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.4.1.11.9.4.1\">0.937</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T2.4.1.11.9.5\">0.926</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 86 |
+
"capture": "TABLE II: Cross-validation Open-set Classification Results"
|
| 87 |
+
},
|
| 88 |
+
"3": {
|
| 89 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\"><span class=\"ltx_text\" id=\"S4.T3.2.1.1\" style=\"font-size:90%;\">TABLE III</span>: </span><span class=\"ltx_text\" id=\"S4.T3.3.2\" style=\"font-size:90%;\">Cross-Validation One-vs-rest Attribution Results</span></figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S4.T3.4\" style=\"width:433.6pt;height:275.5pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(61.0pt,-38.7pt) scale(1.39130681706911,1.39130681706911) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.4.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_th_row ltx_border_tt\" id=\"S4.T3.4.1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.1.1.1.1\">Feature</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T3.4.1.1.1.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.1.1.2.1\">Bacc</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_tt\" colspan=\"2\" id=\"S4.T3.4.1.1.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.1.1.3.1\">AUC</span></th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.2.2\">\n<th class=\"ltx_td ltx_th ltx_th_column ltx_th_row\" id=\"S4.T3.4.1.2.2.1\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.1.2.2.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.2.2.2.1\">IF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_r ltx_border_t\" id=\"S4.T3.4.1.2.2.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.2.2.3.1\">PPCA</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.1.2.2.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.2.2.4.1\">IF</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.4.1.2.2.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.2.2.5.1\">PPCA</span></th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.3.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T3.4.1.3.1.1\">DINOV1 (BNE)</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.3.1.2\">0.775</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.4.1.3.1.3\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.3.1.3.1\">0.945</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.3.1.4\">0.908</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.3.1.5\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.3.1.5.1\">0.944</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.4.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.4.1.4.2.1\">DINOV2 (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.4.2.2\">0.702</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.4.1.4.2.3\">0.923</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.4.2.4\">0.865</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.4.2.5\">0.938</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.5.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.4.1.5.3.1\">CLIP (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.5.3.2\">0.625</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.4.1.5.3.3\">0.748</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.5.3.4\">0.821</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.5.3.5\">0.804</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.6.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T3.4.1.6.4.1\">Mandelli et al.\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib23\" title=\"\">23</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.6.4.2\">0.747</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.4.1.6.4.3\">0.627</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.6.4.4\">0.875</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.6.4.5\">0.773</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.7.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.4.1.7.5.1\">GLCM (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.7.5.2\">0.838</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.4.1.7.5.3\">0.752</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.7.5.4\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.7.5.4.1\">0.920</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.7.5.5\">0.852</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.8.6\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.4.1.8.6.1\">FFT-GLCM (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.8.6.2\">0.746</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.4.1.8.6.3\">0.553</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.8.6.4\">0.860</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.8.6.5\">0.730</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.9.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T3.4.1.9.7.1\">Synthbuster\u00a0<cite class=\"ltx_cite ltx_citemacro_cite\">[<a class=\"ltx_ref\" href=\"https://arxiv.org/html/2409.18881v2#bib.bib20\" title=\"\">20</a>]</cite>\n</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.9.7.2\">0.683</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S4.T3.4.1.9.7.3\">0.377</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.9.7.4\">0.826</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.4.1.9.7.5\">0.563</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.10.8\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T3.4.1.10.8.1\">FFT-PEAKS (BNE)</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.10.8.2\">0.808</td>\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S4.T3.4.1.10.8.3\">0.747</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.10.8.4\">0.865</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.1.10.8.5\">0.811</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.1.11.9\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_bb\" id=\"S4.T3.4.1.11.9.1\">PATCH-FFT-PEAKS (BNE)</th>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T3.4.1.11.9.2\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.4.1.11.9.2.1\">0.896</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb ltx_border_r\" id=\"S4.T3.4.1.11.9.3\">0.795</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T3.4.1.11.9.4\">0.917</td>\n<td class=\"ltx_td ltx_align_center ltx_border_bb\" id=\"S4.T3.4.1.11.9.5\">0.861</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 90 |
+
"capture": "TABLE III: Cross-Validation One-vs-rest Attribution Results"
|
| 91 |
+
}
|
| 92 |
+
},
|
| 93 |
+
"image_paths": {
|
| 94 |
+
"1(a)": {
|
| 95 |
+
"figure_path": "2409.18881v2_figure_1(a).png",
|
| 96 |
+
"caption": "(a) Checkerboard\nFigure 1: Comparison between a CycleGAN (a) and a pristine (b) Western blot image. The CycleGAN image contains checkerboard artifacts visible when zooming into the image. The highlighted Fourier spectrum peaks (see the yellow arrows) also indicate the presence of those artifacts.",
|
| 97 |
+
"url": "http://arxiv.org/html/2409.18881v2/x7.png"
|
| 98 |
+
},
|
| 99 |
+
"1(b)": {
|
| 100 |
+
"figure_path": "2409.18881v2_figure_1(b).png",
|
| 101 |
+
"caption": "(b) No checkerboard\nFigure 1: Comparison between a CycleGAN (a) and a pristine (b) Western blot image. The CycleGAN image contains checkerboard artifacts visible when zooming into the image. The highlighted Fourier spectrum peaks (see the yellow arrows) also indicate the presence of those artifacts.",
|
| 102 |
+
"url": "http://arxiv.org/html/2409.18881v2/x8.png"
|
| 103 |
+
},
|
| 104 |
+
"2": {
|
| 105 |
+
"figure_path": "2409.18881v2_figure_2.png",
|
| 106 |
+
"caption": "Figure 2: Solution workflow.\nGiven a questioned Western blot, we leverage residual noise extraction, periodic artifacts, and texture features\u2019 analysis to perform synthetic image detection and AI-generation model source attribution.",
|
| 107 |
+
"url": "http://arxiv.org/html/2409.18881v2/x9.png"
|
| 108 |
+
},
|
| 109 |
+
"3(a)": {
|
| 110 |
+
"figure_path": "2409.18881v2_figure_3(a).png",
|
| 111 |
+
"caption": "(a) Entire image\nFigure 3: Comparison between the (a) Fourier calculated over the entire noise residual image (FFT-PEAKS strategy) and (b) average-patch Fourier spectrum (PATCH-FFT-PEAKS strategy). All spectra are centered in spatial frequencies (0,0)00(0,0)( 0 , 0 ) and are computed over zero-mean signals.",
|
| 112 |
+
"url": "http://arxiv.org/html/2409.18881v2/x10.png"
|
| 113 |
+
},
|
| 114 |
+
"3(b)": {
|
| 115 |
+
"figure_path": "2409.18881v2_figure_3(b).png",
|
| 116 |
+
"caption": "(b) Patch-based\nFigure 3: Comparison between the (a) Fourier calculated over the entire noise residual image (FFT-PEAKS strategy) and (b) average-patch Fourier spectrum (PATCH-FFT-PEAKS strategy). All spectra are centered in spatial frequencies (0,0)00(0,0)( 0 , 0 ) and are computed over zero-mean signals.",
|
| 117 |
+
"url": "http://arxiv.org/html/2409.18881v2/x11.png"
|
| 118 |
+
},
|
| 119 |
+
"4": {
|
| 120 |
+
"figure_path": "2409.18881v2_figure_4.png",
|
| 121 |
+
"caption": "Figure 4: Different features extracted to expose AI generation artifacts. Each visualization results from an average of 100100100100 images.\nAll spectra are centered in spatial frequencies (0,0)00(0,0)( 0 , 0 ) and are computed over zero-mean signals.\nThe Fourier spectra on the same row are depicted over the same scale to help visual comparison. Rows show the explored telltale; columns show different generative AI models and a pristine source.",
|
| 122 |
+
"url": "http://arxiv.org/html/2409.18881v2/x12.png"
|
| 123 |
+
}
|
| 124 |
+
},
|
| 125 |
+
"validation": true,
|
| 126 |
+
"references": [],
|
| 127 |
+
"url": "http://arxiv.org/html/2409.18881v2"
|
| 128 |
+
}
|
20241004/2409.19992v2.json
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "A large-scale operational study of fingerprint quality and demographics",
|
| 3 |
+
"abstract": "Even though a few initial works have shown on small sets of data some level of bias in the performance of fingerprint recognition technology with respect to certain demographic groups, there is still not sufficient evidence to understand the impact that certain factors such as gender, age or finger-type may have on fingerprint quality and, in turn, also on fingerprint matching accuracy. The present work addresses this still under researched topic, on a large-scale database of operational data containing 10-print impressions of almost 16,000 subjects. The results reached provide further insight into the dependency of fingerprint quality and demographics, and show that there in fact exists a certain degree of performance variability in fingerprint-based recognition systems for different segments of the population. Based on the experimental evaluation, the work points out new observations based on data-driven evidence, provides plausible hypotheses to explain such observations, and concludes with potential follow-up actions that can help to reduce the observed fingerprint quality differences. This way, the current paper can be considered as a contribution to further increase the algorithmic fairness and equality of biometric technology.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Following the evidence collected in different studies regarding the accuracy variability of fingerprint recognition technology with respect to age [1 ###reference_b1###, 2 ###reference_b2###], gender [3 ###reference_b3###] and finger-type [4 ###reference_b4###, 5 ###reference_b5###], the biometric community seems to agree that there exists a certain degree of bias in current fingerprint-based systems for different demographic groups. However, with the exception of a few studies, this inconsistency in the recognition rates has been mainly observed on small-to-medium databases under laboratory conditions and, therefore, it is difficult to quantify to what extent this bias may translate to large-scale systems working under real operational conditions.\nThe main objective of the present study is to contribute to close the existing gap on the availability of evidence on large-scale operational data where fingerprint bias across age, gender and finger-type can be observed. The data employed in the project was acquired at consulates all around the world for the issuing of visas, using the current most extended technology for scanning fingerprints (i.e., 500dpi touch-based optical scanners). Based on these data, the fundamental question addressed in the study is: assuming that the amount of identity-related information present in natural fingerprints is not linked to a specific demographic group, why do some segments of the population consistently present higher error rates when utilising fingerprint recognition technology? Or, put in other words, is it a valid assumption that natural fingerprints of different demographic groups intrinsically contain, from birth, an equivalent amount of identity-related information that can be leveraged by automated systems to recognise individuals?\nIn order to provide a plausible answer to this difficult query, other related topics also considered in the article are: Do fingerprints coming from men contain more information than those coming from women? Adults\u2019 fingerprints comprise more information than those of young children or elders? Why do each of the fingers (including the thumb) of the hand provide different accuracy performance in fingerprint recognition systems?\nThe work tackles all these issues from a fingerprint quality perspective following a three-step approach: 1) determine if, indeed, there is a bias in fingerprint quality according to: age, gender and finger-type; 2) provide hypotheses that can reasonably explain the observed bias; 3) suggest a viable course of action to correct and/or minimise the observed bias.\nThe analysis and results presented in the paper can bring yet further insight into the key area of fingerprint quality from a new perspective, not considered in research publications to date, bridging one of the few gaps still existing in the field. As such, the conclusions drawn from the work, can bring huge value to different actors involved in the design, development and deployment of fingerprint-based operational systems:\nIndustry producing fingerprint readers: provide them insight for the design of improved ergonomics, enhanced usability of fingerprint acquisition scanners and help them improve the technology behind fingerprint sensing.\nAlgorithm/application developers: give them some guidance for the development of improved recognition strategies which may take into account demographic features to optimise the performance for different segments of the population.\nEnd-users in the domains of border management and law enforcement: help them develop guidelines and best practices for different applications/scenarios regarding the decision-making process with respect to different demographic groups.\nThe rest of the paper is structured as follows. After a brief discussion on fingerprint quality provided in Sect. 2 ###reference_###, the experimental framework, including the database and the quality metric which are the basis for the evaluation, are described in Sect. 3 ###reference_###. Results, together with the most relevant works in the literature related to each of the factors considered in the paper, are discussed in Sect. 4.1 ###reference_### (gender), Sect. 4.2 ###reference_### (age) and Sect. 4.3 ###reference_### (finger-type and handedness). Finally, conclusions are drawn in Sect. 5 ###reference_###."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "A brief reflection on fingerprint quality",
|
| 15 |
+
"text": "It is nowadays a well-established and accepted fact among the biometric community, that quality of biometric samples is, without a doubt, the primary factor impacting the accuracy and overall performance of biometric recognition systems. Therefore, any variability observed in the accuracy of fingerprint-based applications with respect to demographic features, can be mainly linked to discrepancies in the quality level of fingerprints produced for each of the demographic groups considered.\nFrom the perspective of automated recognition systems, ultimately, fingerprint quality can be directly connected to the amount of usable identity-related information that can be reliably and consistently extracted from the digital representation of a fingerprint (which in the vast majority of cases is a 2D image). In turn, this amount of information, will determine to what extent the fingerprint is unique and, therefore, will provide high accuracy in recognition tasks. As such, coming full circle, quality becomes an estimation or a prediction of accuracy, as defined by the ISO/IEC 29794-1:2016 standard.\nThere is a key concept that should be highlighted and that can easily remain somewhat hidden in the previous paragraph, within the adjective \u201cusable\u201d. It is important to distinguish between 1) the amount of identity information contained in the natural fingerprint, and, from that naturally built-in intel, 2) the amount that is captured by the digital representation of the fingerprint and that can be extracted (i.e., is \u201cusable\u201d) by automated systems. Fundamentally, it is this digitally extractable and usable information that defines the quality of a fingerprint and its potential to be used in recognition tasks.\nAs such, it is of great consequence to minimise the amount of information that is lost in the conversion process from the natural fingerprint to its digital representation. This translation \u201cfrom the natural world to the digital domain\u201d is accomplished at the time of acquisition by the fingerprint reader. It follows that fingerprint quality, and therefore also the eventual accuracy provided by automated recognition systems, is mainly determined at the time of acquisition. Once a fingerprint image is captured, it is difficult to enhance its quality or to improve the amount of usable information in the digital representation of the fingerprint. Known factors that determine this quality level at the time of acquisition can be mainly categorised in four groups: environmental related (e.g., temperature, humidity or lighting); finger related (e.g., dry skin, dirty or oily skin); reader related (e.g., resolution, ergonomics, sensing technology, post-processing required); behavioural, related to the human-reader interaction (e.g., positioning of the fingers, pressure applied in case of touch-based readers).\nWhile, it is difficult to have any control over finger related conditions, in the case of supervised controlled scenarios, there do exist a broad range of actions that can be triggered in order to exert a positive effect on acquisition factors concerning the environment, the reader and the behaviour of the individual."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Experimental framework",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Database",
|
| 27 |
+
"text": "The database used for the present study was captured in the span of three months, between March and May 2022, in the context of a pilot project carried out jointly between the Swedish Government through their Migration Agency and eu-LISA. The collaborative effort was focused on the improvement of the processes involved in the issuing and control of visas for non-EU citizens entering the Schengen area and, in particular, on the testing of tools for biometric quality assessment.\nThe database is formed by a total 15,942 different 10-print digital records produced by as many individuals, that is, it contains a total 159,420 fingerprint samples. Individuals come from 34 different non-EU countries around the world. Fingerprints were captured in 115 different locations with specific designated stations for visa issuing purposes (typically consulates). Therefore, fingerprints were captured in office-like scenarios, and the process was conducted by operators with experience and instruction in the field of fingerprint acquisition.\nAll fingerprints were captured using the same FBI-certified touch-based 500 dpi optical scanner (Cross Match Patrol ID). All fingerprint images are flat (i.e., not rolled). For each 10-print record three different images were captured, following the typical sequence 4-4-2, that is: slap of the right hand (all four fingers acquired simultaneously), slap of the left hand (all four fingers acquired simultaneously) and lastly the two thumbs acquired at the same time.\nIn case of low quality, fingerprints were reacquired up to three times, and the best individual quality score for each finger was kept in the final composite record.\nFor each fingerprint, the meta-data available in the database is: gender; age; country of origin; code of the station where it was acquired; finger-type (left or right; little, ring, middle, index or thumb); cycle number of the acquisition attempt (i.e., 1, 2 or 3).\nThe gender ratio men/women in the database is 8206/7736 (i.e., 52/48%). The bar graph in Fig. 1 ###reference_### shows the age distribution in the database, using colour code (light red for men and light gold for women), to distinguish between the number of women and men for each age group. All subjects are above 12 years of age, as that was the minimum age for compulsory fingerprinting for the issuing of visas at the time the data was acquired.\nDue to data protection reasons, for the experimental evaluation we did not have direct access to the fingerprint images, which were retained by the Swedish authorities, owners of the operational data. The analysis was carried out solely based on: 1) the quality scores extracted by the Swedish colleagues from the fingerprint samples; 2) the metadata corresponding to each sample in the database.\nWhile the list of countries and the number of subjects per country is available, this information is not disclosed in the present paper, as we believe it has a negligible impact in the final outcome of the experiments.\n\n###figure_1###"
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "Quality measure: NFIQ-2",
|
| 33 |
+
"text": "For the experimental evaluation, NFIQ-2 was used as the tool to assess quality. NFIQ-2 is a system- and vendor-agnostic fingerprint quality measure which is enforced as reference implementation of the ISO/IEC 29794-4 standard on fingerprint quality. The source code, which is publicly available, was initially developed as an initiative of US NIST in response to the need of having reliable quality assessment tools dissociated from specific vendors [6 ###reference_b6###]. Currently, the project is updated and maintained by the ISO SC 37 Working Group 3. The NFIQ-2 quality measure has been independently evaluated in numerous occasions, showing very high performance across recognition systems, and has nowadays been adopted by the biometric community as the de facto standard to set base results to which other quality measures are compared. The current version of NFIQ-2 is trained on flat fingerprints of 500 dpi resolution, captured with optical devices, that is, the same category of fingerprint images contained in the experimental database used in the present work. There is an ongoing project to extend NFIQ-2 to assess also the quality of rolled fingerprints and of other resolution values."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Results",
|
| 39 |
+
"text": "As a general caveat, when analysing the results it is important to bear in mind that, as was described in Sect. 3.1 ###reference_###, fingerprints in the experimental DB were obtained in a supervised highly controlled scenario, with a re-capture policy to be followed by operators in case the acquired images did not reach the desired quality threshold.\nThis means that the impact of behavioural or environmental factors (e.g., positioning of fingers, pressure applied, cleanliness of fingers) in the resulting quality of fingerprint images is largely minimised, unlike other uncontrolled and unsupervised scenarios (e.g., the current trend of touchless applications for the self-acquisition of fingerprints with smartphone cameras). Consequently, given the specificities of the experimental DB, potential differences in the fingerprint quality levels across population groups should be mainly originated by flaws or lack of consistency in the overall system functioning, including both the reader and the subsequent post-processing steps of fingerprint images."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Fingerprint quality and gender",
|
| 45 |
+
"text": ""
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.1.1",
|
| 49 |
+
"parent_section_id": "4.1",
|
| 50 |
+
"section_name": "4.1.1 Works related to fingerprints and gender",
|
| 51 |
+
"text": "Several works published in the scientific literature have studied the differences between female and male fingerprints, predominantly in the context of the development of sex detection algorithms [3 ###reference_b3###]. In a nutshell, the key finding of all this previous research can be summarised as: there exists a measurable difference between female and male fingerprints in terms of overall size and ridge width, which results, ultimately, in a difference in the ridge density. In essence, it has now been soundly established that female fingerprints present, on average, thinner ridges contained in a smaller surface, yielding a higher ridge density that can be exploited to classify fingerprints according to sex, a useful feature, for instance, in criminal investigations [7 ###reference_b7###]."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.1.2",
|
| 55 |
+
"parent_section_id": "4.1",
|
| 56 |
+
"section_name": "4.1.2 Results: fingerprint quality and gender",
|
| 57 |
+
"text": "In the present study we analyse the potential divergence of men and women fingerprints from a different perspective: quality. The objective is to determine whether or not fingerprints present different quality levels according to sex attributes, which would, in turn, trigger possible biases in the performance of fingerprint-based recognition systems for men and women.\nFig. 2 ###reference_### shows, on the left, the NFIQ-2 quality distribution for all the fingerprints in the experimental database for men (light red) and women (light gold). The box-plots corresponding to the two distributions are depicted on the right. In each box, the central mark indicates the median, and the bottom and top edges of the box indicate the 25th and 75th percentiles, respectively. The whiskers extend to the most extreme data points not considered outliers, and the outliers are plotted individually using the \u2019+\u2019 marker symbol.\n\n###figure_2### OBSERVATION. Results show a significant difference in the quality levels of female and male fingerprints, not only on the average value 59 to 49, but also on the consistency of the quality produced, with a noticeably lower variance for men fingerprints (i.e., it is rare that men produce fingerprints of lower quality).\nDISCUSSION. As was presented in Sect. 2 ###reference_###, there is a direct link between fingerprint quality and usable amount of information within the fingerprint. A surprisingly extended misconception in the early days of the development of automated fingerprint recognition technology, among the general population, was that smaller fingerprints would contain less information than larger fingerprints and, therefore, they would be worse suited for recognition purposes, resulting in lower quality levels and higher error rates. Such erroneous belief, would be in some cases referenced to explain the lower accuracy observed for fingerprints coming from women and also young children, compared to those of adult males.\nHowever, if such \u201csize vs information\u201d correlation assumption was to be proven correct, it would put at risk the credibility and reliability of fingerprint recognition technology as a whole, as it would go against one of its basic founding principles: the permanence of fingerprints. Fingerprints are formed during the fetal stage of pregnancy, being set by week 17 [8 ###reference_b8###]. After their formation, fingerprints have been shown empirically through uncountable experimental studies, to remain invariant in terms of the ridge structure, throughout the lifetime of individuals (with the exception of damage caused by external environmental conditions, such as scars). Different works have provided solid data evidence that supports that the only inherent internal change to fingerprints is the homomorphic growth during childhood [1 ###reference_b1###]. However, both the ridge pattern and minutiae points remain stable. This means that, when we are born, our fingerprints are indeed smaller, but already contain all the identity related information that will allow automated systems to accurately perform recognition of individuals during adulthood.\nBased on the rationale above, it follows that the key factor defining the amount of information contained in fingerprints, is not the absolute size of the fingerprint itself, but the density of the ridge pattern. Smaller fingerprints also present thinner ridges and valleys and, as a result, can comprise the same amount of information as larger fingerprints in a smaller surface. That is, the information density is higher. It goes without saying that, inherently, in their natural state, some fingerprints will contain more information than others, as was already statistically shown in the famous \u201cDoddington\u2019s zoo\u201d work over two and a half decades ago [9 ###reference_b9###, 10 ###reference_b10###]. However, this amount of identity-related information, or level of uniqueness, should bear no interdependence with the size of the fingerprint itself.\nIndeed, the forensic and biometric community have now long shown that fingerprint size does not necessarily correlate to the amount of information contained within them. While there may not be specific studies directly addressing whether smaller fingerprints contain less information, the consensus in the scientific community is that the uniqueness of fingerprints is primarily determined by the detailed ridge patterns and minutiae, rather than their overall size.\nThe discussion presented so far leaves one open question: provided that the size of fingerprints is not the inherent primary factor in determining fingerprint quality and, in turn, also fingerprint accuracy: why do smaller fingerprints coming from women consistently present worse quality levels, and therefore also perform worse, than those of adult males?\nOn the one hand, current standard Automated Fingerprint Identification Systems (AFIS) work at a resolution of 500dpi, which may not be enough to properly capture the structure of smaller fingerprints with higher ridge density such as that of women and children. If the resolution is indeed insufficient, after acquisition, it is not possible to recover the information potentially lost in the translation stage performed by the scanner, from the fingerprint in its natural stage to its digital representation (i.e., fingerprint image sample).\nOn the other hand, even in the cases where 500dpi is enough to properly capture all the information contained in smaller fingerprints, the image post-processing steps that are typically applied to the sample in order to generate the final template may be optimised for larger fingerprints, performing worse with fingerprints of finer details (e.g., segmentation of the region of interest, normalisation, filtering and computation of the orientation field, binarisation, ridge thinning, minutiae detection and extraction).\nHYPOTHESIS. The quality difference between women and men fingerprints, is not due to intrinsic properties of the fingerprints themselves, but to current fingerprint recognition technology being designed to acquire and process, in a more accurate fashion, fingerprints of the size and ridge density of adult men.\nSOLUTIONS. In order to test if the previous hypothesis holds, two course of action may be taken. The combination of these two corrective measures should show an improvement in the performance of female fingerprints, bringing it closer to that achieved by male fingerprints.\nFirst, the resolution of scanners can be increased to 1000 dpi, which is in fact the standard resolution employed by forensic examiners today in the manual comparison of fingerprints and finger-marks. The sensing technology to produce higher resolution readers is already available, and it would ensure the minimisation of the information lost during acquisition. Historically, the resolution of 500 dpi was selected as it offered a good compromise between the fidelity with which the natural fingerprint is acquired and the size of the resulting digital image [11 ###reference_b11###]. However, nowadays, with the continuous increase both in storage capacity and in computational speed of digital systems, the size of the captured images is no longer a limitation factor, and preference should be given to the acquisition of more detailed images of higher quality. We could argue that, currently, digital technology (sensing, storage and processing) has reached a point where there is no reason not to capture fingerprints of higher resolution. Eventually, it is always better to reduce the amount of information acquired, should it be redundant, than to estimate or interpolate missing information. In other words, following one of the primary principles of data science: the more data, the better.\nSecond, fingerprint recognition systems could use image post-processing algorithms specifically trained and tailored to a given fingerprint size and ridge density values. That is, systems may deploy \u201cridge density-specific\u201d (i.e.,\u201csex-specific\u201d or \u201cage-specific\u201d) algorithms for the processing of fingerprints. These adaptative methods would help equalising the performance of fingerprints with different information density values (such as those from women or children)."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.2",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "Fingerprint quality and age",
|
| 63 |
+
"text": ""
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.2.1",
|
| 67 |
+
"parent_section_id": "4.2",
|
| 68 |
+
"section_name": "4.2.1 Works related to fingerprints and age",
|
| 69 |
+
"text": "Several works have addressed in the scientific literature the dependency of fingerprint quality with respect to age. Most of these works focus on the analysis of the performance of children fingerprints. Among these contributions centred on youngsters,\ntwo works that stand out as the most complete are [12 ###reference_b12###] and [2 ###reference_b2###]. The first of these studies [12 ###reference_b12###], was published in 2017 by a team from the Michigan State University. The evaluation was carried out on fingerprints from 309 children aged between 0 and 5 years, captured using both a standard 500ppi resolution reader, and a customed high-resolution 1270ppi reader. The objective was to address some open questions at the time such as: 1) Do fingerprints of young children possess the salient features required to uniquely recognize a child? 2) If so, at what age can a child\u2019s fingerprints be captured with sufficient fidelity for recognition? 3) Can a child\u2019s fingerprints be used to reliably recognize the child as he ages? The main conclusion of the work was that, in fact, children fingerprints contain all the identity-related information that allows for accurate and consistent recognition, provided that the acquired images are of sufficient quality and resolution.\nThese results were confirmed in the second study mentioned above carried out in 2019 by a team of scientists working at the European Commission\u2019s Joint Research Centre (JRC). This work was performed on a database of almost 70K pairs of fingerprints coming from children aged 5-16, that were captured at different points in life with a difference of between 1 to 7 years [2 ###reference_b2###]. The experiments showed that the amount of identity information contained in the fingerprints remained constant during childhood growth, being independent of the fingerprint size. The study verified for the first time that the displacement of minutiae points during this period of life due to growth follows an isotropic model, that is, the displacement is invariant to the distance between the minutiae and the centre of the fingerprint and to the actual location of the minutiae with respect to this centre. Based on this finding, the authors developed a growth model capable of compensating such displacement in order to reduce the ageing effect on fingerprint recognition systems (i.e., the decrease in accuracy when the time separation between the two compared fingerprint impressions increases).\nFrom a general perspective, not focused solely on fingerprints coming from children, the most comprehensive study on the evolution of fingerprint quality and accuracy through the full human life-span was carried out in 2017 by the same JRC research team that published the study described in the previous paragraph [2 ###reference_b2###]. The experiments were carried out over almost 500K fingerprints, coming from individuals aged 0-25 and 65-98 years, acquired under operational conditions with standard 500ppi optical touch-based scanners for the purpose of issuing ID national cards [1 ###reference_b1###]. The most salient observations from the experiments were that: 1) from a quality perspective, children fingerprint impressions show better quality than those of the elderly; 2) fingerprint quality increases rapidly between 0 and 12 years of age, where it stabilises, remaining fairly constant during adulthood until 40-45 years of age, where it starts decreasing linearly.\nIt should be noted that, in the JRC study, due to lack of fingerprint data in the age range 26-64, the behaviour of fingerprint quality during that period was provided as an estimation, based on linear fitting of the available data, rather than an experimental observation. As stated in that work \u201cgiven the limited amount of data available for adults from an age-wise perspective, covering ages 18-25, this assumption regarding the stable behaviour of fingerprint quality for adults until approximately 45 years of age, where it starts decreasing linearly, should still be confirmed on a set of data covering the age range 26-64.\u201d\nEven though undoubtedly of great value due to the amount of data (500K fingerprints) and the nature of these data (acquired in a real operational scenario), unfortunately the JRC study also lacked sex-related information, so that potential differences between the quality of men and women fingerprints could not be analysed."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.2.2",
|
| 73 |
+
"parent_section_id": "4.2",
|
| 74 |
+
"section_name": "4.2.2 Results: fingerprint quality and age",
|
| 75 |
+
"text": "In the present work we address some of the questions left open in the JRC study [1 ###reference_b1###], in particular: 1) confirm (or reject) the assumption that fingerprint quality remains stable through adulthood until around 45 years of age, when it starts decreasing linearly; 2) analyse if there exists any significant difference between the evolution of women and men fingerprints quality through life.\nWith this two-fold objective in mind, Fig. 3 ###reference_### shows, using boxplots, the evolution of fingerprint quality, both for men (light red) and women (light gold) between 12 and over 80 years of age. Different observations can be extracted from the results presented in this figure:\n\n###figure_3### OBSERVATION 1. As a first new finding, the results provide data-based evidence to confirm the hypothesis made in the JRC work [1 ###reference_b1###], and that still needed to be verified: fingerprint quality stays stable from 12 years of age, through the first part of adulthood, until around 45-50 years of age, when it starts decreasing linearly with age.\nOBSERVATION 2. A second new piece of evidence with respect to previous literature, is that the quality of women fingerprints is consistently lower than that of men through the life period covered in the experiments (12 to 80 years of age). The quality difference remains fairly constant for all the age groups covered in the database, except for the case of the youngest subjects (12-14 years of age), where fingerprint quality appears to be almost analogous independently of gender, coinciding with the time period where the size difference between boys and girls is also the smallest. Whether or not this lower quality for female fingerprints is also observable between 0 and 12 years of age (period not covered by the data in the present study) still needs to be assessed.\nOBSERVATION 3. The results also support what was already pointed out by the experiments in [1 ###reference_b1###], regarding the quality of fingerprints of 12-year-old children. The quality level at this young age is already equivalent to that of adults between 18 and 45, and higher than that of adults over 50.\nOBSERVATION 4. The present study also provides further compelling evidence to confirm the observation already made in [1 ###reference_b1###] regarding the potential issues that may arise in the processing of fingerprints coming from elders above 65 years of age, as a result of the very poor quality of their fingerprint impressions.\nDISCUSSION. The findings of the study point out that the elderly can pose a significant challenge to fingerprint recognition systems, comparable, or even bigger, than children. This fact can have big practical implications. We should not forget that Europe has stated a commitment to \u201cthe rights of the elderly to lead a life of dignity and independence and to participate in social and cultural life\u201d [13 ###reference_b13###]. This implies the need to take measures to ensure the inclusion of elders in every-day life and to guarantee their access to services available to the general population. The results presented in this section show that, given the deterioration of fingerprint quality at advanced points in life, there is a potential risk of age-based discrimination against elders due to increased rates of failure-to-capture or failure-to-enrol. We believe that this should be an important issue to be considered in the design of fingerprint recognition systems in order to avoid possible inter-generational inequality [14 ###reference_b14###].\nProvided that for the elderly, as for adults, fingerprints size and ridge density remains basically invariable with age, the question that follows is: Why then, is fingerprint quality of elders so much lower than that of adults, both for men and women? Why do experiments show a constant linear decrease of fingerprint quality after approximately 50 years of age? The main difference in this case is not size and ridge density (as was the case for men and women), but skin condition. It has been shown in the specialised literature that, with age, and especially starting at around 45 years of age, skin gradually loses elasticity, firmness and also becomes drier, mostly due to the decrease of collagen [15 ###reference_b15###]. This progressive deterioration of skin properties, together with other possible medical sufferings typical of older ages such as arthritis, hinder the acquisition of fingerprint impressions based on current touch-based optical scanners, as the interaction between the reader platen and the finger does not happen in the optimal expected manner.\nHYPOTHESIS. The low quality of fingerprints coming from elders is related to the deficient interaction between the finger and touch-based optical scanners due to the gradual and constant degradation of skin condition through life, being especially noticeable after 45 years of age.\nSOLUTIONS. Over the last decade there has been a big investment within the biometric community to develop a new generation of fingerprint touchless acquisition methods based both on specifically designed readers and on standard equipment such as smartphones [16 ###reference_b16###]. While much progress has been made in the field, touchless fingerprint recognition technology is still lacking behind in some aspects with respect to the traditional and better-established touch-based procedures, as it adds variability and less controlled conditions to the acquisition process [17 ###reference_b17###, 18 ###reference_b18###]. However, for some use-cases such as the acquisition of elder fingerprints, that prove to be highly challenging for touch-based readers (as shown in the experiments reported in [1 ###reference_b1###] and in the present work), the use of touchless scanners can provide an improvement with respect to the current state of play. Avoiding contact between the skin and the sensor would solve the quality issues derived from this poor interaction and should, in theory, improve the overall performance of fingerprint recognition systems for this segment of society.\nFurther experimental evaluation needs to be performed to verify this possible solution, on a dedicated database acquired both with touch-based and touchless technologies and including both adults (as control group) and elders."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.3",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Fingerprint quality and finger-type",
|
| 81 |
+
"text": ""
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3.1",
|
| 85 |
+
"parent_section_id": "4.3",
|
| 86 |
+
"section_name": "4.3.1 Works related to fingerprints and finger-type",
|
| 87 |
+
"text": "Only a few early studies, carried out on small sets of data, consider the analysis of quality and recognition accuracy from each finger individually. Furthermore, except for two of these research publications [4 ###reference_b4###, 19 ###reference_b19###], in general, existing literature only addresses the topic as a by-result of experimental evaluations with a different main focus. However, even if carried out on small sets of data captured ad-hoc in laboratory conditions, all these pioneer studies already point out to the possibility that the quality level and performance of the images produced by each individual finger may vary quite significantly.\nThe two most relevant works from the state of the art, related to the current piece of research, were both published in 2010. In the first of these studies [4 ###reference_b4###], researchers from the Gjovik University analysed the influence of finger types on fingerprint recognition performance, over a database containing all 10 fingers of 100 subjects. Fingerprints were captured individually (not slaps impressions) using six different scanners, five touch-based and one touchless. Their analysis confirmed for the first time following a rigorous scientific protocol, the general claim that was commonly made to that date, without a solid experimental basis, regarding the lesser accuracy of the little finger for recognition tasks.\nIn the second of the 2010 studies, the authors examine how fingerprint recognition systems can balance the speed of a single-print system with the robustness of a ten-print system by using a combination of fingers [19 ###reference_b19###]. The goal of this research was to find the combination of fingers that provides the best trade-off between acquisition/verification speed and fewest comparison errors. For this objective, a database containing images of all 10 fingers from 70 subjects was used. It was found that the thumb, index, and middle fingers of both hands presented the highest quality scores and were, accordingly, also the fingers providing the best accuracy in recognition tasks.\nTherefore, the two 2010 studies summarised above, coincide in their conclusions on the quality and accuracy of finger-types. They both showed that the little fingers present the worst performance of all fingers, while the best is reached using the thumbs and indexes. These observations were, to a large extent, further reinforced in a technical report by the US NIST in 2018 [20 ###reference_b20###]."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.3.2",
|
| 91 |
+
"parent_section_id": "4.3",
|
| 92 |
+
"section_name": "4.3.2 Results: fingerprint quality, handedness and finger-type",
|
| 93 |
+
"text": "Building upon the findings of these preliminary publications, in the current section we present the results of the first large-scale operational study of fingerprint quality based on individual fingers. The objective of the analysis is to determine if, based on the current most extended acquisition devices (i.e., 500dpi touch-based optical scanners), there is a difference in the quality level of each finger. That is, we want to give an answer to the question: are all fingers born equal (in terms of quality)? Or put in another way, traditionally, all fingerprints are treated the same in terms of fingerprint recognition, but should they? Do all fingers produce images that present the same discrimination potential? Are fingerprint samples produced by all fingers equally suited for personal authentication? Do all fingerprint samples possess the same amount of usable discriminative information independently of the finger that produced them?\nFig. 4 ###reference_### (left) shows the NFIQ-2 quality distribution for all the fingers combined of the left hand (blue) and of the right hand (pink). On the right, the box-plots corresponding to the two previous distributions are depicted.\n\n###figure_4### OBSERVATION 1. From Fig. 4 ###reference_### it can be observed that the right hand consistently provides better quality fingerprint images than the left hand.\nDISCUSSION. We believe that this first observation is related to the handedness of humans. It is estimated that around 90% of the world population is right-handed. As such, it is expected that individuals are more skilled to interact with the acquisition scanner using the right hand (their dominant hand) and, therefore, to provide better quality fingerprints.\n\n###figure_5### The effect of handedness on fingerprint quality and performance was already considered in a 2010 work carried out on a database of 40 subjects, evenly distributed between right- and left-handed [21 ###reference_b21###]. In that study, both the index and middle fingers of both hands were acquired with three scanners of different technologies (optical, thermal and capacitive). Due to the scarcity of data, results did not show any conclusive trends regarding the impact of handedness on fingerprint performance. The present findings amend the final observations of [21 ###reference_b21###] and support the assumption that there is a difference in accuracy between the use of the dominant and non-dominant hand for fingerprint recognition.\nWhile the results presented in this work indicate that handedness has an impact on fingerprint quality, such a statement still needs to be verified with a specific experimental protocol where information regarding handedness is part of the available metadata for the subjects in the dataset (which was not the case for the current study).\nHYPOTHESIS 1. When touch-based slap acquisition readers are used, the dominant hand produces higher quality fingerprint impressions than the non-dominant hand.\nSOLUTIONS. Assuming the correctness of the hypothesis above, in systems requiring fingers of only one hand, priority could be given to the dominant one. In general it would be useful to store as part of the personal data of individuals, their handedness, so as to give preference, for recognition purposes, to fingerprints from their dominant hand.\nIn Fig.5 ###reference_###, we present the box plots of the quality distributions for each individual finger of the left and right hands, following the natural order of the fingers of both hands.\nOBSERVATION 2. From this figure it can be seen that the quality of fingerprints differs quite significantly depending on the finger-type. Fingers, ordered from lower to higher quality of their fingerprints are: little, ring, middle, index and thumb. This is consistent across both hands. This order follows the natural anatomical order of fingers in the hand.\nThe most noticeable difference among all fingers, is the low quality produced by the little finger compared to all other four.\nDISCUSSION. In their natural state, it is likely that all fingerprints present a similar amount of discriminative information (\u201ccharacter\u201d definition of quality in ISO/IEC 29794- 1), however, due to the ergonomics/usability of scanners, this information is better captured for some fingers (\u201cfidelity\u201d definition of quality in ISO/IEC 29794-1).\nTouch-based slap acquisition devices require the user to press all four fingers contemporarily against a flat platen, following a straight forward line from the subject. From an anatomical perspective, due to the limitations of the wrist and finger joints, this task is easier to perform with the index finger, and becomes increasingly less comfortable for the rest of the fingers. The result is a good interaction of the index finger with the scanner, that worsens successively for the other fingers. This diverse interaction, in turn, results in different quality of the captured images.\nAnother factor to be taken into account is that, when capturing all four fingers contemporarily, it is more difficult for the subject to control placing and the amount of pressure exerted by each single one of them separately. It has been pointed out in different works that, when using touch-based scanners, the pressure applied against the platen is one of the key parameters that determines the final quality level obtained for the resulting fingerprint images [22 ###reference_b22###].\nFollowing a similar rationale, since thumbs are acquired on their own, subjects are more proficient at placing them correctly on the platen, also having a better control over the pressure applied to each of them, with independence of the rest of fingers, resulting in high quality images. These results confirm, on a statistically significant database, what was initially pointed out by the two works presented in [4 ###reference_b4###] and [19 ###reference_b19###].\nHYPOTHESIS 2. The difference in quality among individual fingers is due, not to the distinctiveness of the natural fingerprints, but to the way in which the information contained in them is translated to the digital domain by current touch-based slap acquisition scanners.\nSOLUTIONS. While we cannot be certain regarding the cause of the fidelity issue detected in the experiments for the different individual fingers, we can hypothesise that such a difference between character and utility may be put down to a large extent to the ergonomics and usability of current touch-based scanners for the acquisition of slaps impressions. While the performance of current readers is very high, it could still be improved to better acquire the ring and little fingers. This could be accomplished, for instance, by not using a flat platen, but rather a slightly curved one, for example in the shape of a dome (or the top segment of a half sphere). Another possibility would be to change the angle of the flat platen, not to be perpendicular to the body, but to capture the fingers following the natural angle formed at the elbow by the arm and the forearm when it is comfortable rested on a desk or when the hand is placed in front of the chest with the elbow bent.\nFrom a general perspective, further investment should be dedicated to the development of scanners (e.g., researching in potential benefits of touchless technology), the improvement of acquisition protocols (e.g., placement of the scanners, type of fingers to be captured), and the improvement of the usability of this technology both by operators and captured subjects.\nIf all four fingers are being acquired, from a quality perspective, it is preferable to acquire them individually one by one, rather than as a slap image (all four simultaneously). This way, each finger interacts with the capturing device independently of the other fingers, which would allow the subject to have better control over the acquisition process and, in turn, it would likely improve the quality of fingerprints, especially for the ring and little fingers. Of course, this would entail a significant increase in the acquisition time, which can be a critical factor in some practical applications/scenarios.\nFor some specific applications it may not possible to acquire all 10 fingers, or it may be decided due to different constraints not to acquire them (e.g., restricted acquisition time). In these cases where an a-priori decision must be taken regarding which individual fingers to acquire, priority should be given to, in this order: thumb, index, middle, ring and little fingers.\nRecognition algorithms may also make use of this a priory knowledge regarding the expected quality of fingerprints depending on the finger that produced them. For instance, specific score-level fusion strategies could be designed in order to give a higher weight in the final comparison outcome to those fingers that are known to provide better quality [23 ###reference_b23###]."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "Conclusion",
|
| 99 |
+
"text": "Given the paramount importance of quality in biometrics, a very significant amount of effort has been dedicated from all stakeholders in the field (researchers, practitioners, users, developers, vendors) to study the main factors that have an impact on the quality of different biometric characteristics. In particular, as happens in many other areas related to biometrics, fingerprints stand out as the biometric characteristic, where the largest amount of research and information has been generated. In fact, the big investment made in fingerprint quality assessment, has paved the way for other biometric characteristics, such as face, to get the support required in order to reach a similar level of development in terms of understanding of quality.\nHowever, even if undeniable progress has been achieved in fingerprint quality analysis, there are still areas where further research needs to be performed in order to confirm or complement some preliminary observations that have been made on statistically limited sets of data. The present paper is a contribution to bring further insight to this field and to bridge some of these still existing gaps.\nIn particular, the present work is focused on determining the impact that different demographic factors have on the quality and overall performance of fingerprints in automated recognition systems. The experimental analysis has aimed at assessing the potential bias that may exist in fingerprint recognition technology with respect to gender and age, and, also, to the inter-dependency of fingerprint accuracy with regard to handedness and finger-type.\nThe results reached in the work, performed on a database of almost 16,000 subjects acquired under real operational conditions, can lead to practical decisions for the improvement on the use and deployment of this technology.\nTo sum-up the main contributions of the work, the next concrete observations and follow-up experimentation have been extracted from the results presented in the paper:\nGENDER:\nOBSERVATION: there exists a bias in fingerprint recognition between men and women. Fingerprints coming from men, systematically produce higher quality levels and, as a result, also higher accuracy, than those of women.\nEXPLANATION: such bias is hypothesised to be produced by the difference in ridge density between male and female fingerprints.\nFOLLOW-UP ACTIONS: higher resolution readers, and specific processing algorithms for fingerprints with a higher ridge density (those of women) may contribute to reduce the observed bias. Further experimentation is required to confirm or reject this hypothesis.\nAGE:\nOBSERVATION: there exists a bias in fingerprint recognition for adults and elders over 50 years of age. Fingerprint quality starts decreasing linearly at around 45-50 years of age, and can pose a real challenge in terms of error rates for elders over 65.\nEXPLANATION: this quality and accuracy difference between young adults and elders, which is consistent both for men and women, is assumed to be produced by the degradation of the skin properties (mainly elasticity and dryness due to loss in collagen levels) which results in an inadequate interaction with the touch-based technology used today in most cases for fingerprint acquisition.\nFOLLOW-UP ACTIONS: the use of the new generation of touchless fingerprint scanners can help to reduce the observed bias for elders. As in the case of gender, further specific experimentation is required to verify such hypothesis.\nHANDEDNESS and FINGER-TYPE:\nOBSERVATION: there exists a bias between the quality provided by fingerprints of the right hand and the left hand, and also among individual fingers, being the most accurate the thumb and index, and the one providing clearly the lowest quality the little finger.\nEXPLANATION: the explanation for these observed differences is a combination of: 1) handedness: individuals provide better fingerprint quality with their dominant hand; and 2) ergonomics: touch-based slap fingerprint readers are designed to better capture thumbs and indexes, and less adapted for the acquisition of the ring and little finger.\nFOLLOW-UP ACTIONS: Working on the design, from an ergonomic and usability perspective, of fingerprint slap scanners can help to reduce the quality and accuracy difference between hands and also among individual fingers.\nTo conclude, we can state that the present study has shown that there exist biases among the fingerprint quality of different demographic groups and finger-types. The question that we should ask ourselves is: is this bias due to intrinsic differences in the amount of information contained in our natural fingers? Do fingers from elders contain less information than those of adults? Do fingers from women contain less information than those of men? Is the little finger worse suited for recognition purposes than the index? Or rather, are we not being able to translate or digitally capture with enough accuracy, the information contained in natural fingers of certain segments of the population into the digital domain? That is, are fingerprint readers and fingerprint processing algorithms better designed to perform well with certain demographic groups? Or put in another way, considering the topic from the ISO/IEC 29794-1 standard definition of quality: are these discrepancies caused by inherent \u201ccharacter\u201d differences of by \u201cfidelity\u201d issues originated at the time of acquisition?\nAssuming that the observed quality variability is mainly caused by an inadequate acquisition of fingerprints, and not by the fingers themselves, how can we ensure that such bias is minimised and that we ensure uniform, consistent quality and accuracy across all demographic groups and finger-types?\nThe present study provides some plausible hypotheses for this observed variability, and finally proposes a course of action that can be followed to minimise the quality difference and, ultimately, to improve recognition accuracy.\nTo sum up the conclusions of the work, we can state that, in order to improve the overall quality of fingerprint recognition technology across all segments of the population, making its accuracy as consistent and uniform as possible for all demographic groups (e.g., age or gender), we should focus on the design and development of more advanced fingerprint readers, and of specific processing algorithms, capable of extracting all the information contained in fingerprints in a user-friendly and repeatable manner. That is, we should adapt to the specificities of demographic groups in order to minimise potential performance biases among them."
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"appendix": [],
|
| 103 |
+
"tables": {},
|
| 104 |
+
"image_paths": {
|
| 105 |
+
"1": {
|
| 106 |
+
"figure_path": "2409.19992v2_figure_1.png",
|
| 107 |
+
"caption": "Figure 1: Age distribution in the experimental database according to sex.",
|
| 108 |
+
"url": "http://arxiv.org/html/2409.19992v2/x1.png"
|
| 109 |
+
},
|
| 110 |
+
"2": {
|
| 111 |
+
"figure_path": "2409.19992v2_figure_2.png",
|
| 112 |
+
"caption": "Figure 2: NFIQ-2 quality distributions for men and women (left) and the corresponding box plots for those distributions (right). On each box, the central mark indicates the median, and the bottom and top edges of the box indicate the 25th and 75th percentiles, respectively. The whiskers extend to the most extreme data points not considered outliers, and the outliers are plotted individually using the \u2019+\u2019 marker symbol.",
|
| 113 |
+
"url": "http://arxiv.org/html/2409.19992v2/x2.png"
|
| 114 |
+
},
|
| 115 |
+
"3": {
|
| 116 |
+
"figure_path": "2409.19992v2_figure_3.png",
|
| 117 |
+
"caption": "Figure 3: Boxplots representing the fingerprint quality for the different age groups present in the experimental database, separated by sex (men, light red, and women, light gold). the central mark indicates the median, and the bottom and top edges of the box indicate the 25th and 75th percentiles, respectively. The whiskers extend to the most extreme data points not considered outliers, and the outliers are plotted individually using the \u2019+\u2019 marker symbol.",
|
| 118 |
+
"url": "http://arxiv.org/html/2409.19992v2/x3.png"
|
| 119 |
+
},
|
| 120 |
+
"4": {
|
| 121 |
+
"figure_path": "2409.19992v2_figure_4.png",
|
| 122 |
+
"caption": "Figure 4: NFIQ-2 quality distributions (left) for all fingers in the database separated by hand and the corresponding box plots (right) for those distributions.",
|
| 123 |
+
"url": "http://arxiv.org/html/2409.19992v2/x4.png"
|
| 124 |
+
},
|
| 125 |
+
"5": {
|
| 126 |
+
"figure_path": "2409.19992v2_figure_5.png",
|
| 127 |
+
"caption": "Figure 5: Box-plots corresponding to the NFIQ-2 quality distributions per finger-type. For clarity, the box-plots follow the natural order of the fingers of both hands. The box-plots corresponding to fingers of the left hand are depicted in shades of blue, while those corresponding to fingers of the right hand are shown in shades of pink.",
|
| 128 |
+
"url": "http://arxiv.org/html/2409.19992v2/x5.png"
|
| 129 |
+
}
|
| 130 |
+
},
|
| 131 |
+
"validation": true,
|
| 132 |
+
"references": [],
|
| 133 |
+
"url": "http://arxiv.org/html/2409.19992v2"
|
| 134 |
+
}
|
20241004/2410.00822v2.json
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "VHASR: A Multimodal Speech Recognition System With Vision Hotwords",
|
| 3 |
+
"abstract": "The image-based multimodal automatic speech recognition (ASR) model enhances speech recognition performance by incorporating audio-related image. However, some works suggest that introducing image information to model does not help improving ASR performance. In this paper, we propose a novel approach effectively utilizing audio-related image information and set up VHASR, a multimodal speech recognition system that uses vision as hotwords to strengthen the model\u2019s speech recognition capability. Our system utilizes a dual-stream architecture, which firstly transcribes the text on the two streams separately, and then combines the outputs. We evaluate the proposed model on four datasets: Flickr8k, ADE20k, COCO, and OpenImages. The experimental results show that VHASR can effectively utilize key information in images to enhance the model\u2019s speech recognition ability. Its performance not only surpasses unimodal ASR, but also achieves SOTA among existing image-based multimodal ASR.111Our code is available at https://github.com/193746/VHASR",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "###figure_1### ASR model (Chan et al., 2015 ###reference_b3###) takes audio as input and produces corresponding transcription. One effective method to improve the model\u2019s ASR performance is to increase both the volume of training data and the number of model parameters. We are now in the era of large language models (LLMs) (Brown, 2020 ###reference_b1###; Li et al., 2023 ###reference_b19###), which have been developed across various domains (Yang et al., 2024 ###reference_b33###; Zhang et al., 2023 ###reference_b34###). In the speech domain, there are also many LLMs that demonstrate impressive ASR capabilities (Chu et al., 2023 ###reference_b5###; Radford et al., 2023 ###reference_b25###). However, this approach can be expensive. A more cost-effective alternative is to introduce additional information related to speech into the model. This information can be presented in either textual or visual forms. The ASR system that utilizes audio-related information from various modalities is referred to as multimodal ASR.\nHotwords, which are terms in certain professional fields or words that are easily confused with other homonyms, are common textual cues. There have been many studies on how to freely customize hotwords and improve the recall of hotwords (Han et al., 2021 ###reference_b13###; Shi et al., 2024 ###reference_b27###). It is also possible to use captions as textual information (Moriya and Jones, 2018 ###reference_b20###; Han et al., 2023 ###reference_b12###).\nVisual cues can be in the form of video or image. Audio-Visual Speech Recognition (AVSR) enhances the accuracy of speech recognition by capturing lip movement information of characters in video (Ivanko et al., 2023 ###reference_b17###). Image-based multimodal ASR extracts visual feature from image associated with speech to correct transcription errors. We abbreviate image-based multimodal ASR as IBSR. Because the lip movement information of video\u2019s role is closely linked to his speech, it influences nearly every word in the transcribed text. In contrast, IBSR only impacts a subset of the words as the image is only associated with specific audio clips (Onea\\textcommabelowt\u0103 and Cucu, 2022 ###reference_b21###). IBSR currently lacks a universal and effective method for utilizing image information, leading to various experimental results in different studies. Some works (Sun et al., 2016 ###reference_b31###; Srinivasan et al., 2020a ###reference_b28###, c ###reference_b30###) have a positive effect by incorporating image information, while others (Srinivasan et al., 2020b ###reference_b29###; Onea\\textcommabelowt\u0103 and Cucu, 2022 ###reference_b21###; Han et al., 2023 ###reference_b12###), have the opposite effect.\nIn this paper, we propose a novel approach effectively utilizing audio-related image information and set up VHASR, a multimodal speech recognition system that utilizes vision hotwords to enhance the model\u2019s speech recognition capability. It calculates the similarity between different modalities to improve the effectiveness of cross-modal fusion. Drawing inspiration from text hotwords, we utilize Vision Transformer (ViT) to partition images into multiple visual tokens and consider each visual token as a vision hotword. Our system adopts a dual-stream architecture. One stream is the ASR stream, which receives audio information and produces transcribed text. The other stream is the vision hotwords (VH) stream, which receives vision hotwords and audio hidden features, and generates corresponding text. In the VH stream, we calculate the similarity between audio and vision hotwords to reduce the weight of vision hotwords with low similarity. This process helps to extract fine-grained image information. When inferring, VHASR first transcribes the text separately from the ASR stream and the VH stream, and then merges the outputs. We ensure the high accuracy of the merged output by comparing the similarity of different modalities. Specifically, we first calculate the audio-image similarity to discard the VH stream if the similarity is low. Then, we calculate the image-text token similarity to compare the ASR stream and VH stream outputs by tokens. Finally, tokens with higher similarity are selected for the merged output.\nWe evaluate the proposed model on four datasets: Flickr8k, ADE20k, COCO, and OpenImages. The experimental results show that VHASR can effectively utilize critical information in images to improve the model\u2019s ASR performance. Its performance is not only better than ordinary unimodal ASR models but also surpasses existing IBSR models. The contributions of this paper are as follows:\nWe demonstrate that through our idea of vision hotwords, injecting audio-related image into the ASR model can help the model correct transcription errors.\nWe propose VHASR, by utilizing a dual-stream architecture and calculating the cross-modal similarity, it promotes effective utilization of visual information in vision hotwords.\nThe proposed model achieves SOTA on Flickr8k, ADE20k, COCO, and OpenImages."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Related Work",
|
| 15 |
+
"text": "Image-based multimodal ASR. Sun et al. (2016 ###reference_b31###) introduce a multimodal speech recognition scenario which utilizes images to assist the language model in decoding the most probable words and rescoring the top hypotheses. Caglayan et al. (2019 ###reference_b2###) propose an end-to-end multimodal ASR system implemented by LSTM (Graves and Graves, 2012 ###reference_b11###). They apply visual adaptive training (Palaskar et al., 2018 ###reference_b22###) to finetune a pretrained ASR model with visual data, and leverage visual information to initialize model\u2019s encoder and decoder. Srinivasan et al. (2020b ###reference_b29###) present a model for multimodal ASR that utilizes visual feature from object proposals. They integrate the features of object proposals into a visual representation by utilizing their attention distribution as weights, and incorporate this visual representation into the model via a hierarchical attention mechanism. Onea\\textcommabelowt\u0103 and Cucu (2022 ###reference_b21###) combine speech and visual embeddings using two fusion approaches. One approach fuses along the embedding dimension, and another fuses along the sequence dimension. They find that the first method performs better. Han et al. (2023 ###reference_b12###) propose a novel multimodal ASR model called ViLaS, which is based on the continuous integrate-and-fire (CIF) mechanism (Dong and Xu, 2020 ###reference_b6###). It can integrate image and caption information simultaneously or separately to facilitate speech recognition. Chang et al. (2023 ###reference_b4###) propose a multimodal ASR system for embodied agents. Their model is based on Transformer (Vaswani et al., 2017 ###reference_b32###), where the visual feature vector is concatenated to the decoder\u2019s input word embedding at every timestep of generation.\n###figure_2### Function of image information. Srinivasan et al. (2020a ###reference_b28###) conduct the experiment called audio corruption, in which they mask the words related to nouns and places with silence and white noise, respectively. The study demonstrates that visual representations help in recovering words that are masked in the input acoustic signal. Srinivasan et al. (2020c ###reference_b30###) think the previous work has only masked a fixed set of words in the audio, which is an unrealistic setting. So, they propose a method called RandWordMask, where masking can occur for any word segment to improve the audio corruption experiment. Kumar et al. (2023 ###reference_b18###) propose two effective ASR error correction methods: one employs a gated fusion method to concatenate visual and textual features, while the other utilizes image\u2019s caption as correction model\u2019s prompt. Both methods demonstrate that visual information helps restoring incorrect words in transcription. In short, image information helps to recover incorrect words in transcription that are caused by masked acoustic signals or ASR model\u2019s error."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "VHASR",
|
| 21 |
+
"text": ""
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "ASR Stream",
|
| 27 |
+
"text": "Follow Gao et al. (2022 ###reference_b10###), we adopt this parallel Transformer for non-autoregressive end-to-end speech recognition as the basic framework of our ASR stream. As shown in green dashed box of Figure 2 ###reference_###, the adopted framework consists of four parts: speech encoder, predictor, sampler, and decoder. The framework adopts two-pass training and one-pass inference."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.1.1",
|
| 31 |
+
"parent_section_id": "3.1",
|
| 32 |
+
"section_name": "3.1.1 Acoustic Representation Learning",
|
| 33 |
+
"text": "Let be a speech sequence with frames, . is a sequence of tokens, and its length is . Each token is in the vocabulary , .\nThe speech encoder adopts the SAN-M (Gao et al., 2020 ###reference_b9###) structure, which is a special Transformer Layer that combines self-attention mechanism with deep feed-forward sequential memory networks (DFSMN). It converts the input to the hidden representation .\nThe predictor is a two-layer Deep Neural Networks (DNN) model that aligns speech and text based on CIF. It is used to predict the length of sentences and extract acoustic representation from the speech encoder\u2019s hidden representation .\nThe sampler does not contain learnable parameters and is only applied when training. It strengthens acoustic representation to semantic representation by incorporating text features, aiming to better train the context modeling ability of the speech decoder. The denotes the embedding of . The sampler initially identifies tokens in with transcription errors, and subsequently combines the correct embeddings of these error tokens in into to generate the semantic features . Not every error token\u2019s correct embedding will be incorporated into , this is determined by the mixing ratio , ."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "3.1.2",
|
| 37 |
+
"parent_section_id": "3.1",
|
| 38 |
+
"section_name": "3.1.2 Decoding Process",
|
| 39 |
+
"text": "The speech decoder adopts the bidirectional SAN-M structure. In the first pass of training, the hidden representation obtained by the speech encoder and the acoustic representation generated by the predictor are input to the speech decoder to obtain the initial decoding result .\nIn the second pass of training, the hidden representation and the semantic representation obtained by the sampler are input to the speech decoder to obtain the second decoding result\nDuring the first pass, no gradient backpropagation is performed, and is only used to determine the sampling number of the sampler. obtained in the second pass is used to calculate the ASR loss. In inference, the model directly takes as output and does not calculate ."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "3.2",
|
| 43 |
+
"parent_section_id": "3",
|
| 44 |
+
"section_name": "Vision Hotwords Stream",
|
| 45 |
+
"text": ""
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "3.2.1",
|
| 49 |
+
"parent_section_id": "3.2",
|
| 50 |
+
"section_name": "3.2.1 Vision Representation Learning",
|
| 51 |
+
"text": "In the VH stream, we need to extract visual features from images by the vision encoder firstly. A naive idea is to extract the features from the entire image. Because most of the information in the image is unrelated to the audio, especially the background of the image. The introduction of irrelevant information may cause the visual features to become noise. Therefore, we should consider a strategy to extract fine-grained image information.\nThe vision encoder is essentially ViT (Dosovitskiy et al., 2020 ###reference_b7###). ViT uses Transformer to extract visual features. It follows the application of the Transformer in natural language processing by initially dividing the image into multiple patches, considering each patch as a token, embedding the positional information, and then feeding visual tokens (Peng et al., 2024 ###reference_b23###) into the Transformer. The features outputted by ViT are the features of each visual token. If the downstream task of ViT is classification, a trainable CLS token can be added in front of the visual token. The score on the CLS token can then be utilized for classification. It would be a good choice if we utilize each visual tokens\u2019 features instead of entire image\u2019s features. At the token granularity level, we can diminish the impact of tokens unrelated to audio and amplify the influence of tokens related to audio.\nSo, our strategy is to calculate the features of each visual token and then adjust the weight of visual tokens. For the ASR model with text hotwords, it is often necessary to consider how to capture involved hotwords and exclude unrelated hotwords when there are many customized hotwords. This is similar to our consideration, so we call each visual token an vision hotword. Let Z be the input image. First, utilize the vision encoder to transform it into token-level visual features , where K represents the number of vision hotwords. The initial features of , corresponds to the features of the CLS token, while others are vision hotwords\u2019 features.\nWe determine the correlation between each vision hotword and audio by calculating their cosine similarity. Specifically, the first step is to input into the vision adapter, which is composed of a linear layer, to obtain . Next, we add the embedding of a trainable CLS token to the beginning of the acoustic features , resulting in . This is then fed into the speech adapter, which consists of a Transformer layer, to produce the complete audio features .\nThen, calculate cosine similarity between vision hotwords and audio, denoted as .\nFinally, we adjust the weight of by .\n###figure_3### In order to enhance the effectiveness of similarity-based weight adjustment, an additional loss needs to be introduced to train the adapters. We utilize the acoustic features and the CLS token\u2019s features of the image to calculate the image-audio contrastive loss to optimize the adapters. The reason for using image-audio contrastive loss instead of vision hotwords-audio contrastive loss is that the former has a coarser granularity, making it easier to converge. Moreover, during inference, we need to use image-audio similarity for decoding optimization, which will be explained at length in Section 3.3 ###reference_###. Figure 3 ###reference_### illustrates in detail our optimization of visual representation by calculating the similitude between vision hotwords and audio, as well as the similitude between image and audio."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "3.2.2",
|
| 55 |
+
"parent_section_id": "3.2",
|
| 56 |
+
"section_name": "3.2.2 Decoding Process",
|
| 57 |
+
"text": "The blue line in Figure 2 ###reference_### illustrates the data flow of the VH module. After extracting the fine visual representation of , we further refine it using an LSTM-based VH encoder to obtain .\nThe next step is to use a text decoder to obtain the probability distribution of each token. Obviously, if we only use which just contains image information as input, it will result in a significant deviation in the probability distribution of tokens, and the VH stream\u2019s outcome will be completely inconsistent with the correct transcription. So, we need to incorporate certain hidden features of the ASR stream to modify the output of the VH stream. Drawing lessons from the idea of Shi et al. (2024 ###reference_b27###), we integrate the acoustic features vector outputted by the predictor and the hidden features outputted by the speech decoder with separately to derive and , which have been influenced by image information. The VH decoder adopts the same bidirectional SAN-M architecture as the speech decoder.\nThe final input to the VH output layer is the average of and ."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "3.3",
|
| 61 |
+
"parent_section_id": "3",
|
| 62 |
+
"section_name": "Dual-stream Merging",
|
| 63 |
+
"text": "###figure_4### In this section, we will discuss how to merge the outputs of the ASR stream and the VH stream. A straightforward approach is to add the probability distributions of tokens from two modules by assigning a specific weight, denoted as . The formula for is as follows, where , , and are the tokens\u2019 probability distributions of the ASR stream, VH stream, and merged result. is the proportion of , and .\nThe has low flexibility, making it difficult to achieve good results in practice. Figure 4 ###reference_### illustrates a merging method based on image-token similarity, referred to as . The vision encoder and adapter are used to calculate the visual features of the image, , and the text encoder and adapter are used to calculate the features of each token, . The formula for has been provided in Section 3.2.1 ###reference_.SSS1###, and the formula for is as follows. The text encoder consists of Transformer layers, the text adapter consists of a linear layer, and is a additional embedding layer.\nBased on and , the cosine similarity of the image and tokens, , can be calculated.\nWhen calculating , we first calculate the text features of the ASR stream output and the VH stream output , respectively, namely and . Then calculate their cosine similarities with separately, namely and . Finally, a token by token comparison of the dual-stream is conducted according to and . Specifically, the value of these two similarities at any position represents the similarity score between the token at that position and the image. At the same position, and may obtain different tokens. We determine which token to choose as the final result by judging the value of and at that position. If , we take the token on , and vice versa. After completing comparisons, can be obtained.\nIn Section 3.2.1 ###reference_.SSS1###, to achieve an fine-grained visual representation, we additionally introduce speech and vision adapters in VHASR to compute the similarity between vision hotwords and audio. Then, to train the adapter, we calculate contrastive loss between the image and audio. In the inference stage, we can further utilize the trained adapter to optimize by calculating image-audio similarity. Specifically, we calculate the image-audio similarity for a batch of data. If the audio of a piece of data does not match its own image, it is considered that the correlation between this image and audio is low. Therefore, for this data, the output of the VH stream is discarded, and the output of the ASR stream is directly used as the final output. We introduce a novel merging method called . It involves initially filtering data with low image and audio correlation using , followed by dual-stream merging as outlined in . We will conduct a detailed comparative experiment on these three merging methods in Section 4 ###reference_###."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "Experiment",
|
| 69 |
+
"text": ""
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.1",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Configuration",
|
| 75 |
+
"text": "Table 1 ###reference_### shows all the datasets used in this paper, with Flickr8k, ADE20k, COCO, and OpenImages used for training and testing, and SpokenCOCO used for pre-training. Flickr8k is from Harwath and Glass (2015 ###reference_b14###) and SpokenCOCO is from Hsu et al. (2021 ###reference_b16###). ADE20k, COCO and OpenImages are from Local Narratives proposed by Harwath et al. (2016 ###reference_b15###). In order to shorten the experimental period, we filter data with audio exceeding 40s in ADE20k, and with more than 40 tokens or an audio duration of more than 20 seconds in COCO and OpenImages. We use word error rate (WER) as an evaluation metric to evaluate the speech recognition performance of ASR stream, VH stream, , , and .\nOur baseline is 220M English Paraformer. In Flickr8k, we compare our model with Acoustic-LM-RNN proposed by Sun et al. (2016 ###reference_b31###), model utilizing object features as visual information (abbreviated as Multimodal (object) in the paper) from Srinivasan et al. (2020a ###reference_b28###), Weighted-DF in Srinivasan et al. (2020c ###reference_b30###), MAG proposed by Srinivasan et al. (2020b ###reference_b29###), model fusing the two modalities along the sequence dimension (abbreviated as Multimodal (emb) in the paper) from Onea\\textcommabelowt\u0103 and Cucu (2022 ###reference_b21###) and ViLaS in Han et al. (2023 ###reference_b12###).\nThe modules in CLIP-Base (Radford et al., 2021 ###reference_b24###) is utilized to construct the vision encoder and vision adapter for the VH stream, as well as the vision encoder and text encoder for . The vision module of the VH stream freeze parameters during training, and the \u2019s modules do not require training. The 220M English Paraformer is chosen as the foundational framework for ASR stream, initialized with the same parameters as the baseline. of sampler is set to 0.75 and of is set to 0.5. The experimental environment is constructed using Funasr (Gao et al., 2023 ###reference_b8###) and ModelScope. We trained the models until convergence, and consistently utilize the Adam optimizer with a learning rate of 5e-5."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "4.2",
|
| 79 |
+
"parent_section_id": "4",
|
| 80 |
+
"section_name": "Main Result",
|
| 81 |
+
"text": "Table 2 ###reference_### presents the results of the proposed method and baseline on four datasets. For the ASR stream and VH stream, the WER of the ASR stream is lower. The VH stream can acquire the ability of transcribing by utilizing the hidden layer\u2019s features of the ASR stream as VH decoder\u2019s input. Among the three merge methods, has the best results, followed by , and finally . This is consistent with our expected results. has limited flexibility, and the fixed weight proportion is not applicable to all data. By calculating image-token similarity, comparing the results of the ASR stream and VH stream token by token, and resulting in a final output with the highest similarity, achieves WER that are better than both and . Furthermore, by calculating audio-image similarity in addition and excluding the VH stream with low similarity, reduces the transcription error compared to . For the baseline and ASR stream, ASR stream performs better, indicating that joint training of the ASR stream, VH stream, and audio-image pairing improves the unimodal ASR\u2019s performance. For the baseline and , outperforms the baseline on all four datasets, demonstrating the effectiveness of our method. In addition, pre-training with large-scale corpora can further strengthen the performance of the model. We use SpokenCOCO, which contains the largest amount of data, to pre-train the proposed model, resulting in improvements in all five metrics of the model across all four datasets."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "4.3",
|
| 85 |
+
"parent_section_id": "4",
|
| 86 |
+
"section_name": "Ordinary Multimodal Fusion vs Hotword Level Multimodal Fusion",
|
| 87 |
+
"text": "The comparison results are shown in the Tabel 3 ###reference_###. Without vision information, Vilas (Han et al., 2023 ###reference_b12###) performs better than our VHASR since they have done sufficient pretraining. With vision information, VHASR\u2019s ASR performance has been significantly enhanced and it achieves the lowest WER. Obviously, our experimental results indicate that the incorporation of visual information aids in rectifying tokens for ASR transcription errors and decreasing WER. However, Srinivasan et al. (2020b ###reference_b29###), Onea\\textcommabelowt\u0103 and Cucu (2022 ###reference_b21###) and Han et al. (2023 ###reference_b12###) argue that the speech in Flickr8k is sufficiently clear, making it challenging to enhance transcription performance by incorporating additional information from other modalities.\nMAG (Srinivasan et al., 2020b ###reference_b29###) utilize global visual features, which may introduce a significant amount of information unrelated to audio and potentially impact the model\u2019s ASR performance. They considered this issue and proposed MAOP, which utilizes multiple fine-grained image features extracted from object proposals. But in terms of clean Flickr8k, MAOP\u2019s performance is not as good as MAG\u2019s. Onea\\textcommabelowt\u0103 and Cucu (2022 ###reference_b21###) take a sequence of image features vectors from the layer preceding the global average pooling layer in the vision encoder, for leveraging more fine-grained characteristics of the image. However, they did not consider that some image vectors in the sequence have low correlation with the audio. Introducing these vectors fully into the backbone will still impact the model\u2019s recognition ability. Han et al. (2023 ###reference_b12###) use ViT as a vision encoder and utilizes the image tokens for visual representation, which aligns with our approach. However, they do not reduce the weight of visual tokens with low importance, as we do. This resulted in the introduction of visual information not improving the recognition performance of the model. Compared to these works that use ordinary multimodal fusion approach, our proposed method, which injects visual modality information by vision hotwords, have made improvements in refining image representation and eliminating irrelevant image information. Therefore, our proposed model can enhance performance using visual features even when the dataset is of high quality and the baseline is strong."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "4.4",
|
| 91 |
+
"parent_section_id": "4",
|
| 92 |
+
"section_name": "Audio Corruption",
|
| 93 |
+
"text": "To further demonstrate that introducing image information related to audio can reduce transcription errors in proposed model, we conduct an audio corruption experiment proposed by Srinivasan et al. (2020a ###reference_b28###). We first use the timestamp prediction model proposed by Shi et al. (2023 ###reference_b26###) to align audio and transcribed text. Then, we mask the words in the audio to a certain proportion by replacing the audio segments corresponding to the masked words with Additive White Gaussian Noise (AWGN). We use the recovery rate (RR) defined in Srinivasan et al. (2020a ###reference_b28###) to calculate the proportion of masked words recovered in the model transcription results. Unlike Srinivasan et al. (2020a ###reference_b28###), our approach only masks the test data, while the training data remains unchanged.\nWe conduct this experiment on Flickr8k, ADE20k, and COCO, and the experimental results are shown in Table 4 ###reference_###. In terms of baseline and ASR stream, regardless of the mask ratio, the ASR stream has lower WER and higher RR on all three datasets. This suggests that the jointly trained ASR stream exhibits stronger noise resistance and audio content prediction abilities compared to unimodal ASR. In terms of ASR stream and , by incorporating image information, significantly reduces WER and enhances RR, as evidenced by the mask ratio across the three datasets. This indicates that image information can assist the model in capturing image-related words in audio, enabling the model to accurately transcribe these words even if their corresponding audio is masked. Furthermore, we can argue that on normal unmasked data, image information can assist the model in correcting words related to image but with transcription errors."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4.5",
|
| 97 |
+
"parent_section_id": "4",
|
| 98 |
+
"section_name": "Ablation Result",
|
| 99 |
+
"text": "To demonstrate that the refined image representation extracted by the method proposed in Section 3.2.1 ###reference_.SSS1### is more effective than the full image representation, we conduct the ablation experiments. The experimental results are presented in Table 5 ###reference_###. On four datasets, whether it is or , the model using refined image representation has better performance. This not only shows the effectiveness of the method described in Section 3.2.1 ###reference_.SSS1### but also offers one of reasons why our model is stronger than other benchmarks.\nIn order to showcase the strength of our baseline, we evaluate its ASR performance against Whisper. The experimental results are presented in the Table 6 ###reference_###. As the table shows, Whisper excels on F8k. This is attributed to: (1) Whisper\u2019s utilization of a large amount of data for pretraining, which we did not employ. (2) F8k being a high-quality dataset where many IBSR works achieve superior results without using visual information (refer to Table 3 ###reference_###). Nevertheless, our approach can enhance the ASR capability of the model by effectively leveraging visual information. In ADE20k, a dataset with more noise, our baseline demonstrates stronger noise resistance and performs better than Whisper. In essence, our baseline is on par with Whisper. Furthermore, our system\u2019s ASR module is adaptable and we will explore which ASR module can achieve optimal performance for VHASR in the future."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "Conclusion",
|
| 105 |
+
"text": "We propose VHASR, a multimodal speech recognition system that utilizes vision hotwords to strengthen the model\u2019s speech recognition ability. Our system features a dual-stream architecture, consisting of an ASR stream and a VH stream that firstly transcribe separately and then combine their outputs. By leveraging vision hotwords, the VH stream concentrates on key visual information, allowing for precise transcription of words associated with images. In the merging phase, the VH stream assists the ASR stream in correcting any mis-transcribed words related to images, thereby ensuring high accuracy in the final transcription. We conduct comprehensive experiments on Flickr8k, ADE20k, COCO, and OpenImages, which showcase the effectiveness of vision hotwords and the robust ASR performance of VHASR."
|
| 106 |
+
}
|
| 107 |
+
],
|
| 108 |
+
"appendix": [
|
| 109 |
+
{
|
| 110 |
+
"section_id": "Appendix 1",
|
| 111 |
+
"parent_section_id": null,
|
| 112 |
+
"section_name": "Appendix A Appendix",
|
| 113 |
+
"text": "In Section 4.4 ###reference_###, we demonstrated that VHASR can use image information to correct words which is related to images and has transcription errors. In this section, we will use examples to explain how VHASR achieves this.\nFigure 5 ###reference_### shows three examples from Flickr8k. \"A\" refers to the transcription of the ASR stream, \"V\" refers to the transcription of the VH stream, \"M\" refers to the transcription obtained by , and \"T\" refers to the real transcription. We extract the attention score matrix from the last layer of the VH decoder and create a heatmap. The horizontal axis of the heatmap represents the subtoken, while the vertical axis represents the number of vision hotwords. We identify the subtokens that are transcribed incorrectly by the ASR stream but corrected by the VH stream. Then, we extract the top 5 vision hotwords that have the highest attention scores with them. Chosen vision hotwords are marked on the original image.\nIn the first example, the ASR stream incorrectly transcribes \"grey\" as \"gry\", while the VH stream doesn\u2019t make this mistake. The combination of the two streams helps correct the error. specifically, the subtokens corresponding to \"grey\" focus on six vision hotwords, five of which are background, and one includes the grey pants of the dancer. Therefore, the vision encoder successfully extracts information about \"grey\" and helps the VH stream transcribe \"grey\" accurately. Furthermore, by merging the ASR stream and VH stream with , error in the ASR stream is rectified. In the second example, the ASR stream incorrectly transcribes \"girls\" as \"girl\", which was also corrected by the accurate VH stream. Among the vision hotwords corresponding to \"girls\", three are related to background, and two include the heads of the girls, so the VH stream successfully identified \"girls\". In the third example, the ASR stream incorrectly transcribes \"river\" as \"room\", but the VH stream correctly transcribes \"river\" by utilizing the information about \"river\" contained in the vision hotwords. By merging, the VH stream helps correct error in the ASR stream. These examples are not unique, and the same phenomenon occurs in many utterances. In Figure 6 ###reference_###, we show another three examples from COCO for readers\u2019 reference.\nAlthough the VH stream of VHASR has less speech recognition ability than the ASR stream, it can extract features from key vision hotwords and capture keywords in transcription, thereby correctly identifying words that may be difficult for the ASR stream to recognize. After token-by-token merging based on visual-token similarity, the VH stream can correct some transcription errors in the ASR stream, leading to a more accurate transcription.\n###figure_5### ###figure_6###"
|
| 114 |
+
}
|
| 115 |
+
],
|
| 116 |
+
"tables": {
|
| 117 |
+
"1": {
|
| 118 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" id=\"S4.T1.1.1.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Dataset</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Train</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Validation</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T1.1.1.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Test</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T1.1.2.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" id=\"S4.T1.1.2.1.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Flickr8k</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.2.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">30,000</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.2.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">5,000</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T1.1.2.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">5,000</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.3.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.1.3.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">ADE20k</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.3.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">17,067</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.3.2.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">1,672</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.3.2.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.4.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.1.4.3.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">COCO</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.4.3.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">49,109</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.4.3.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3,232</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.4.3.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.5.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" id=\"S4.T1.1.5.4.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">OpenImages</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.5.4.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">269,749</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.5.4.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">27,813</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T1.1.5.4.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">-</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T1.1.6.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b\" id=\"S4.T1.1.6.5.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">SpokenCOCO</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.6.5.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">592,187</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.6.5.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">25,035</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T1.1.6.5.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">-</td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Datasets used in experiments.</figcaption>\n</figure>",
|
| 119 |
+
"capture": "Table 1: Datasets used in experiments."
|
| 120 |
+
},
|
| 121 |
+
"2": {
|
| 122 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T2\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T2.13\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T2.13.14.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T2.13.14.1.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.14.1.1.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.13.14.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Baseline</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"6\" id=\"S4.T2.13.14.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">VHASR</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.13\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.2.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.3.3.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.5.5.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.7.7.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.9.9.9\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.11.11.11\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T2.13.13.13\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T2.13.15.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T2.13.15.1.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.15.1.1.1\">Flickr8k</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.2\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.15.1.2.1\">3.86</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2715</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.84</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.94</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.82</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.62</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.15.1.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.60</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.16.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.55</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.51</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.54</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">3.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.16.2.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.13.16.2.6.1\">3.21</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.17.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T2.13.17.3.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.17.3.1.1\">ADE20k</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.2\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.17.3.2.1\">10.51</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2715</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.33</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.52</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.38</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.80</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.17.3.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.60</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.18.4\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.27</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.37</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.32</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.62</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.18.4.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.13.18.4.6.1\">9.53</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.19.5\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T2.13.19.5.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.19.5.1.1\">COCO</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.2\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.19.5.2.1\">10.44</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2715</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.35</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.34</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.28</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.63</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.19.5.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.61</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.20.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.25</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.36</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">10.28</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">9.60</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T2.13.20.6.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.13.20.6.6.1\">9.59</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.21.7\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b ltx_border_t\" colspan=\"2\" id=\"S4.T2.13.21.7.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.21.7.1.1\">OpenImages</span></th>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T2.13.21.7.2\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T2.13.21.7.2.1\">8.72</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2715</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.61</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.58</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.58</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">7.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T2.13.21.7.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">7.71</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T2.13.22.8\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.58</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.63</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">8.59</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">7.70</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T2.13.22.8.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T2.13.22.8.6.1\">7.68</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Main results of proposed model in four datasets. The and represent the result of the ASR stream and VH stream, respectively. combines the outcomes of two streams with designated weights, whereas merges by assessing the similarity between image and text tokens. Building on , evaluates the similarity between images and audio to eliminate unrelated images.</figcaption>\n</figure>",
|
| 123 |
+
"capture": "Table 2: Main results of proposed model in four datasets. The and represent the result of the ASR stream and VH stream, respectively. combines the outcomes of two streams with designated weights, whereas merges by assessing the similarity between image and text tokens. Building on , evaluates the similarity between images and audio to eliminate unrelated images."
|
| 124 |
+
},
|
| 125 |
+
"3": {
|
| 126 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T3\">\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T3.8\" style=\"width:433.6pt;height:514.4pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(148.5pt,-176.2pt) scale(3.17540515084887,3.17540515084887) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T3.8.8\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T3.1.1.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_border_t\" colspan=\"4\" id=\"S4.T3.1.1.1.2\" rowspan=\"2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\"><span class=\"ltx_text\" id=\"S4.T3.1.1.1.2.1\">Model</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"2\" id=\"S4.T3.1.1.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Word Error Rate ()</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.8.8.9.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.8.8.9.1.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">w/o vision</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T3.8.8.9.1.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">w vision</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T3.2.2.2\">\n<td class=\"ltx_td ltx_align_left ltx_border_t\" colspan=\"4\" id=\"S4.T3.2.2.2.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Acoustic-LM-RNN <cite class=\"ltx_cite ltx_citemacro_citep\">(Sun et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib31\" title=\"\">2016</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.2.2.2.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">14.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T3.2.2.2.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13.81 ( 0.94)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.3.3.3\">\n<td class=\"ltx_td ltx_align_left\" colspan=\"4\" id=\"S4.T3.3.3.3.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Multimodal (object) <cite class=\"ltx_cite ltx_citemacro_citep\">(Srinivasan et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib28\" title=\"\">2020a</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.3.3.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">16.40</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.3.3.3.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">14.80 ( 1.60)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.4.4.4\">\n<td class=\"ltx_td ltx_align_left\" colspan=\"4\" id=\"S4.T3.4.4.4.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Weighted-DF <cite class=\"ltx_cite ltx_citemacro_citep\">(Srinivasan et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib30\" title=\"\">2020c</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.4.4.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13.70</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.4.4.4.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13.40 ( 0.30)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.5.5.5\">\n<td class=\"ltx_td ltx_align_left\" colspan=\"4\" id=\"S4.T3.5.5.5.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">MAG <cite class=\"ltx_cite ltx_citemacro_citep\">(Srinivasan et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib29\" title=\"\">2020b</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.5.5.5.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13.60</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.5.5.5.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">13.80 ( 0.20)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.6.6.6\">\n<td class=\"ltx_td ltx_align_left\" colspan=\"4\" id=\"S4.T3.6.6.6.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">Multimodal (emb) <cite class=\"ltx_cite ltx_citemacro_citep\">(Onea<span class=\"ltx_ERROR undefined\">\\textcommabelow</span>t\u0103 and Cucu, <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib21\" title=\"\">2022</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.6.6.6.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3.80</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.6.6.6.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">4.30 ( 0.50)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.7.7.7\">\n<td class=\"ltx_td ltx_align_left\" colspan=\"4\" id=\"S4.T3.7.7.7.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">ViLaS <cite class=\"ltx_cite ltx_citemacro_citep\">(Han et\u00a0al., <a class=\"ltx_ref\" href=\"https://arxiv.org/html/2410.00822v2#bib.bib12\" title=\"\">2023</a>)</cite>\n</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.7.7.7.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3.40</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T3.7.7.7.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3.40 ( 0)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T3.8.8.8\">\n<td class=\"ltx_td ltx_align_left ltx_border_b ltx_border_t\" colspan=\"4\" id=\"S4.T3.8.8.8.2\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">VHASR</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T3.8.8.8.3\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">3.86</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T3.8.8.8.1\" style=\"padding-top:2.5pt;padding-bottom:2.5pt;\">\n<span class=\"ltx_text ltx_font_bold\" id=\"S4.T3.8.8.8.1.1\">3.21</span> ( 0.65)</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Comparison results with benchmarks in F8k.</figcaption>\n</figure>",
|
| 127 |
+
"capture": "Table 3: Comparison results with benchmarks in F8k."
|
| 128 |
+
},
|
| 129 |
+
"4": {
|
| 130 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T4\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T4.12\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T4.12.13.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.12.13.1.1\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T4.12.13.1.1.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.12.13.1.2\" rowspan=\"2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T4.12.13.1.2.1\">Mask Ratio</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"2\" id=\"S4.T4.12.13.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">Baseline</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"4\" id=\"S4.T4.12.13.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">VHASR</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.12\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.2.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.4.4.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.6.6.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.8.8.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.10.10.10\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T4.12.12.12\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">\n()</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T4.12.14.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.1\" rowspan=\"3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T4.12.14.1.1.1\">Flickr8k</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">30%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">29.36</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">80.75</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">27.39</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">83.22</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.14.1.7.1\">22.36</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.14.1.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.14.1.8.1\">83.29</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.15.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">50%</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">46.79</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">69.80</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">45.01</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">72.84</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.15.2.6.1\">38.35</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.15.2.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.15.2.7.1\">73.38</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.16.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">70%</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">62.66</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">58.83</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">63.43</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">60.60</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.16.3.6.1\">55.04</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.16.3.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.16.3.7.1\">61.34</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.17.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.1\" rowspan=\"3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T4.12.17.4.1.1\">ADE20k</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">30%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">24.79</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">92.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">24.40</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">92.51</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.17.4.7.1\">19.96</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.17.4.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.17.4.8.1\">92.60</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.18.5\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">50%</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">34.16</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">89.18</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">32.95</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">89.86</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.18.5.6.1\">26.91</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.18.5.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.18.5.7.1\">90.06</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.19.6\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">70%</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">42.30</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">86.33</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">40.70</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">87.45</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.19.6.6.1\">33.39</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.19.6.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.19.6.7.1\">87.46</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.20.7\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T4.12.20.7.1\" rowspan=\"3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text\" id=\"S4.T4.12.20.7.1.1\">COCO</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">30%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">25.60</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">92.02</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">24.23</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">92.85</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.20.7.7.1\">20.13</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T4.12.20.7.8\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.20.7.8.1\">92.87</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.21.8\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">50%</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">35.59</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">89.42</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">33.22</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.21.8.5.1\">91.05</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.21.8.6.1\">27.06</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T4.12.21.8.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.21.8.7.1\">91.05</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T4.12.22.9\">\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.1\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">70%</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.2\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">44.00</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.3\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">87.76</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.4\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">41.35</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.5\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\">89.26</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.6\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.22.9.6.1\">33.84</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T4.12.22.9.7\" style=\"padding-top:0.5pt;padding-bottom:0.5pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T4.12.22.9.7.1\">89.32</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Experimental results of audio corruption with AWGN.</figcaption>\n</figure>",
|
| 131 |
+
"capture": "Table 4: Experimental results of audio corruption with AWGN."
|
| 132 |
+
},
|
| 133 |
+
"5": {
|
| 134 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T5\">\n<div class=\"ltx_inline-block ltx_transformed_outer\" id=\"S4.T5.4\" style=\"width:433.6pt;height:219.2pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(110.0pt,-55.6pt) scale(2.02942053181368,2.02942053181368) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T5.4.4\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_column ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T5.4.4.4.5\" rowspan=\"2\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text\" id=\"S4.T5.4.4.4.5.1\">Dataset</span></th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"2\" id=\"S4.T5.2.2.2.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n ()</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"2\" id=\"S4.T5.4.4.4.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">\n ()</th>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.5.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T5.4.4.5.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">w/o refine</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T5.4.4.5.1.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">w refine</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T5.4.4.5.1.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">w/o refine</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T5.4.4.5.1.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">w refine</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.6.1\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_t\" colspan=\"2\" id=\"S4.T5.4.4.6.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Flickr8k</th>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.4.6.1.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">3.88</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.4.6.1.3\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.6.1.3.1\">3.82</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.4.6.1.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">3.67</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T5.4.4.6.1.5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.6.1.5.1\">3.62</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.7.2\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" colspan=\"2\" id=\"S4.T5.4.4.7.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">ADE20k</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.7.2.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">10.67</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.7.2.3\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.7.2.3.1\">10.38</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.7.2.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">10.17</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.7.2.5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.7.2.5.1\">9.80</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.8.3\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row\" colspan=\"2\" id=\"S4.T5.4.4.8.3.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">COCO</th>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.8.3.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">10.46</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.8.3.3\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.8.3.3.1\">10.28</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.8.3.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">9.64</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T5.4.4.8.3.5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.8.3.5.1\">9.63</span></td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T5.4.4.9.4\">\n<th class=\"ltx_td ltx_align_left ltx_th ltx_th_row ltx_border_b\" colspan=\"2\" id=\"S4.T5.4.4.9.4.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">OpenImages</th>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T5.4.4.9.4.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">8.73</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T5.4.4.9.4.3\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.9.4.3.1\">8.58</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T5.4.4.9.4.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">7.81</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S4.T5.4.4.9.4.5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T5.4.4.9.4.5.1\">7.73</span></td>\n</tr>\n</tbody>\n</table>\n</span></div>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 5: </span> Experimental results of ablation studies.</figcaption>\n</figure>",
|
| 135 |
+
"capture": "Table 5: Experimental results of ablation studies."
|
| 136 |
+
},
|
| 137 |
+
"6": {
|
| 138 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T6\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S4.T6.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S4.T6.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T6.1.1.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Model</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T6.1.1.1.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">Params</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T6.1.1.1.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">Trained</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T6.1.1.1.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">Flickr8k</th>\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" id=\"S4.T6.1.1.1.5\" style=\"padding-top:1pt;padding-bottom:1pt;\">ADE20k</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S4.T6.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.2.1.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Whisper</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.2.1.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">244M</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.2.1.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.2.1.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">3.38</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S4.T6.1.2.1.5\" style=\"padding-top:1pt;padding-bottom:1pt;\">14.28</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.3.2\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.3.2.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Whisper</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.3.2.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">1.5B</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.3.2.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\u2715</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.3.2.4\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T6.1.3.2.4.1\">3.05</span></td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.3.2.5\" style=\"padding-top:1pt;padding-bottom:1pt;\">14.08</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.4.3\">\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.3.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">Baseline</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.3.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">220M</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.3.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.3.4\" style=\"padding-top:1pt;padding-bottom:1pt;\">3.86</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S4.T6.1.4.3.5\" style=\"padding-top:1pt;padding-bottom:1pt;\">10.51</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S4.T6.1.5.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T6.1.5.4.1\" style=\"padding-top:1pt;padding-bottom:1pt;\">VHASR</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T6.1.5.4.2\" style=\"padding-top:1pt;padding-bottom:1pt;\">333M</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T6.1.5.4.3\" style=\"padding-top:1pt;padding-bottom:1pt;\">\u2713</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T6.1.5.4.4\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T6.1.5.4.4.1\">3.21</span></td>\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_t\" id=\"S4.T6.1.5.4.5\" style=\"padding-top:1pt;padding-bottom:1pt;\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T6.1.5.4.5.1\">9.53</span></td>\n</tr>\n</tbody>\n</table>\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 6: </span>WER of Whisper, our baseline and VHASR on FLickr8k and ADE20k. The 1.5B Whisper is version V3.</figcaption>\n</figure>",
|
| 139 |
+
"capture": "Table 6: WER of Whisper, our baseline and VHASR on FLickr8k and ADE20k. The 1.5B Whisper is version V3."
|
| 140 |
+
}
|
| 141 |
+
},
|
| 142 |
+
"image_paths": {
|
| 143 |
+
"1": {
|
| 144 |
+
"figure_path": "2410.00822v2_figure_1.png",
|
| 145 |
+
"caption": "Figure 1: Comparison between text hotwords and the vision hotwords proposed in this paper. Text hotwords are a set of custom keywords that are prone to errors, while image hotwords refer to patches of an image. The hotword with a darker rectangle indicates that it is more relevant to transcription.",
|
| 146 |
+
"url": "http://arxiv.org/html/2410.00822v2/x1.png"
|
| 147 |
+
},
|
| 148 |
+
"2": {
|
| 149 |
+
"figure_path": "2410.00822v2_figure_2.png",
|
| 150 |
+
"caption": "Figure 2: The structure of our proposed model, VHASR. The green dashed box contains the modules of the ASR stream, while the blue dashed box contains the modules of the VH stream. The data flow in the ASR part is indicated by green and red lines. It only passes through the red lines during ASR model\u2019s second pass of training. The VH stream\u2019s data flow is denoted by blue lines. The data flow for calculating audio-image similarity is represented by yellow lines. The purple lines illustrate the data flow when merging two streams.",
|
| 151 |
+
"url": "http://arxiv.org/html/2410.00822v2/x2.png"
|
| 152 |
+
},
|
| 153 |
+
"3": {
|
| 154 |
+
"figure_path": "2410.00822v2_figure_3.png",
|
| 155 |
+
"caption": "Figure 3: Using vision hotword-audio similitude and image-audio similitude to learn fine visual representation.",
|
| 156 |
+
"url": "http://arxiv.org/html/2410.00822v2/x3.png"
|
| 157 |
+
},
|
| 158 |
+
"4": {
|
| 159 |
+
"figure_path": "2410.00822v2_figure_4.png",
|
| 160 |
+
"caption": "Figure 4: The specific process of decoding optimization.",
|
| 161 |
+
"url": "http://arxiv.org/html/2410.00822v2/x4.png"
|
| 162 |
+
},
|
| 163 |
+
"5": {
|
| 164 |
+
"figure_path": "2410.00822v2_figure_5.png",
|
| 165 |
+
"caption": "Figure 5: Three examples about how VH stream helps to rectify ASR stream\u2019s error.",
|
| 166 |
+
"url": "http://arxiv.org/html/2410.00822v2/x5.png"
|
| 167 |
+
},
|
| 168 |
+
"6": {
|
| 169 |
+
"figure_path": "2410.00822v2_figure_6.png",
|
| 170 |
+
"caption": "Figure 6: More examples about case study.",
|
| 171 |
+
"url": "http://arxiv.org/html/2410.00822v2/x6.png"
|
| 172 |
+
}
|
| 173 |
+
},
|
| 174 |
+
"validation": true,
|
| 175 |
+
"references": [
|
| 176 |
+
{
|
| 177 |
+
"1": {
|
| 178 |
+
"title": "Language models are few-shot learners.",
|
| 179 |
+
"author": "Tom B Brown. 2020.",
|
| 180 |
+
"venue": "arXiv preprint arXiv:2005.14165.",
|
| 181 |
+
"url": null
|
| 182 |
+
}
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"2": {
|
| 186 |
+
"title": "Multimodal grounding for sequence-to-sequence speech recognition.",
|
| 187 |
+
"author": "Ozan Caglayan, Ramon Sanabria, Shruti Palaskar, Loic Barraul, and Florian Metze. 2019.",
|
| 188 |
+
"venue": "In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 8648\u20138652. IEEE.",
|
| 189 |
+
"url": null
|
| 190 |
+
}
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"3": {
|
| 194 |
+
"title": "Listen, attend and spell.",
|
| 195 |
+
"author": "William Chan, Navdeep Jaitly, Quoc V Le, and Oriol Vinyals. 2015.",
|
| 196 |
+
"venue": "arXiv preprint arXiv:1508.01211.",
|
| 197 |
+
"url": null
|
| 198 |
+
}
|
| 199 |
+
},
|
| 200 |
+
{
|
| 201 |
+
"4": {
|
| 202 |
+
"title": "Multimodal speech recognition for language-guided embodied agents.",
|
| 203 |
+
"author": "Allen Chang, Xiaoyuan Zhu, Aarav Monga, Seoho Ahn, Tejas Srinivasan, and Jesse Thomason. 2023.",
|
| 204 |
+
"venue": "arXiv preprint arXiv:2302.14030.",
|
| 205 |
+
"url": null
|
| 206 |
+
}
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"5": {
|
| 210 |
+
"title": "Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models.",
|
| 211 |
+
"author": "Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, Chang Zhou, and Jingren Zhou. 2023.",
|
| 212 |
+
"venue": "arXiv preprint arXiv:2311.07919.",
|
| 213 |
+
"url": null
|
| 214 |
+
}
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"6": {
|
| 218 |
+
"title": "Cif: Continuous integrate-and-fire for end-to-end speech recognition.",
|
| 219 |
+
"author": "Linhao Dong and Bo Xu. 2020.",
|
| 220 |
+
"venue": "In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6079\u20136083. IEEE.",
|
| 221 |
+
"url": null
|
| 222 |
+
}
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"7": {
|
| 226 |
+
"title": "An image is worth 16x16 words: Transformers for image recognition at scale.",
|
| 227 |
+
"author": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020.",
|
| 228 |
+
"venue": "arXiv preprint arXiv:2010.11929.",
|
| 229 |
+
"url": null
|
| 230 |
+
}
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"8": {
|
| 234 |
+
"title": "Funasr: A fundamental end-to-end speech recognition toolkit.",
|
| 235 |
+
"author": "Zhifu Gao, Zerui Li, Jiaming Wang, Haoneng Luo, Xian Shi, Mengzhe Chen, Yabin Li, Lingyun Zuo, Zhihao Du, Zhangyu Xiao, et al. 2023.",
|
| 236 |
+
"venue": "arXiv preprint arXiv:2305.11013.",
|
| 237 |
+
"url": null
|
| 238 |
+
}
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"9": {
|
| 242 |
+
"title": "San-m: Memory equipped self-attention for end-to-end speech recognition.",
|
| 243 |
+
"author": "Zhifu Gao, Shiliang Zhang, Ming Lei, and Ian McLoughlin. 2020.",
|
| 244 |
+
"venue": "arXiv preprint arXiv:2006.01713.",
|
| 245 |
+
"url": null
|
| 246 |
+
}
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"10": {
|
| 250 |
+
"title": "Paraformer: Fast and accurate parallel transformer for non-autoregressive end-to-end speech recognition.",
|
| 251 |
+
"author": "Zhifu Gao, Shiliang Zhang, Ian McLoughlin, and Zhijie Yan. 2022.",
|
| 252 |
+
"venue": "arXiv preprint arXiv:2206.08317.",
|
| 253 |
+
"url": null
|
| 254 |
+
}
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"11": {
|
| 258 |
+
"title": "Long short-term memory.",
|
| 259 |
+
"author": "Alex Graves and Alex Graves. 2012.",
|
| 260 |
+
"venue": "Supervised sequence labelling with recurrent neural networks, pages 37\u201345.",
|
| 261 |
+
"url": null
|
| 262 |
+
}
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"12": {
|
| 266 |
+
"title": "Vilas: Integrating vision and language into automatic speech recognition.",
|
| 267 |
+
"author": "Minglun Han, Feilong Chen, Ziyi Ni, Linghui Meng, Jing Shi, Shuang Xu, and Bo Xu. 2023.",
|
| 268 |
+
"venue": "arXiv preprint arXiv:2305.19972.",
|
| 269 |
+
"url": null
|
| 270 |
+
}
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"13": {
|
| 274 |
+
"title": "Cif-based collaborative decoding for end-to-end contextual speech recognition.",
|
| 275 |
+
"author": "Minglun Han, Linhao Dong, Shiyu Zhou, and Bo Xu. 2021.",
|
| 276 |
+
"venue": "In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6528\u20136532. IEEE.",
|
| 277 |
+
"url": null
|
| 278 |
+
}
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"14": {
|
| 282 |
+
"title": "Deep multimodal semantic embeddings for speech and images.",
|
| 283 |
+
"author": "David Harwath and James Glass. 2015.",
|
| 284 |
+
"venue": "In 2015 IEEE Workshop on Automatic Speech Recognition and Understanding (ASRU), pages 237\u2013244. IEEE.",
|
| 285 |
+
"url": null
|
| 286 |
+
}
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"15": {
|
| 290 |
+
"title": "Unsupervised learning of spoken language with visual context.",
|
| 291 |
+
"author": "David Harwath, Antonio Torralba, and James Glass. 2016.",
|
| 292 |
+
"venue": "Advances in Neural Information Processing Systems, 29.",
|
| 293 |
+
"url": null
|
| 294 |
+
}
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"16": {
|
| 298 |
+
"title": "Text-free image-to-speech synthesis using learned segmental units.",
|
| 299 |
+
"author": "Wei-Ning Hsu, David Harwath, Tyler Miller, Christopher Song, and James Glass. 2021.",
|
| 300 |
+
"venue": "In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 5284\u20135300.",
|
| 301 |
+
"url": null
|
| 302 |
+
}
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"17": {
|
| 306 |
+
"title": "A review of recent advances on deep learning methods for audio-visual speech recognition.",
|
| 307 |
+
"author": "Denis Ivanko, Dmitry Ryumin, and Alexey Karpov. 2023.",
|
| 308 |
+
"venue": "Mathematics, 11(12):2665.",
|
| 309 |
+
"url": null
|
| 310 |
+
}
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"18": {
|
| 314 |
+
"title": "Visual information matters for asr error correction.",
|
| 315 |
+
"author": "Vanya Bannihatti Kumar, Shanbo Cheng, Ningxin Peng, and Yuchen Zhang. 2023.",
|
| 316 |
+
"venue": "In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1\u20135. IEEE.",
|
| 317 |
+
"url": null
|
| 318 |
+
}
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"19": {
|
| 322 |
+
"title": "Batgpt: A bidirectional autoregessive talker from generative pre-trained transformer.",
|
| 323 |
+
"author": "Zuchao Li, Shitou Zhang, Hai Zhao, Yifei Yang, and Dongjie Yang. 2023.",
|
| 324 |
+
"venue": "arXiv preprint arXiv:2307.00360.",
|
| 325 |
+
"url": null
|
| 326 |
+
}
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"20": {
|
| 330 |
+
"title": "Lstm language model adaptation with images and titles for multimedia automatic speech recognition.",
|
| 331 |
+
"author": "Yasufumi Moriya and Gareth JF Jones. 2018.",
|
| 332 |
+
"venue": "In 2018 IEEE Spoken Language Technology Workshop (SLT), pages 219\u2013226. IEEE.",
|
| 333 |
+
"url": null
|
| 334 |
+
}
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"21": {
|
| 338 |
+
"title": "Improving multimodal speech recognition by data augmentation and speech representations.",
|
| 339 |
+
"author": "Dan Onea\\textcommabelowt\u0103 and Horia Cucu. 2022.",
|
| 340 |
+
"venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4579\u20134588.",
|
| 341 |
+
"url": null
|
| 342 |
+
}
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"22": {
|
| 346 |
+
"title": "End-to-end multimodal speech recognition.",
|
| 347 |
+
"author": "Shruti Palaskar, Ramon Sanabria, and Florian Metze. 2018.",
|
| 348 |
+
"venue": "In 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5774\u20135778. IEEE.",
|
| 349 |
+
"url": null
|
| 350 |
+
}
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"23": {
|
| 354 |
+
"title": "Multi-modal auto-regressive modeling via visual tokens.",
|
| 355 |
+
"author": "Tianshuo Peng, Zuchao Li, Lefei Zhang, Ping Wang, Bo Du, et al. 2024.",
|
| 356 |
+
"venue": "In ACM Multimedia 2024.",
|
| 357 |
+
"url": null
|
| 358 |
+
}
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"24": {
|
| 362 |
+
"title": "Learning transferable visual models from natural language supervision.",
|
| 363 |
+
"author": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021.",
|
| 364 |
+
"venue": "In International conference on machine learning, pages 8748\u20138763. PMLR.",
|
| 365 |
+
"url": null
|
| 366 |
+
}
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"25": {
|
| 370 |
+
"title": "Robust speech recognition via large-scale weak supervision.",
|
| 371 |
+
"author": "Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2023.",
|
| 372 |
+
"venue": "In International Conference on Machine Learning, pages 28492\u201328518. PMLR.",
|
| 373 |
+
"url": null
|
| 374 |
+
}
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"26": {
|
| 378 |
+
"title": "Achieving timestamp prediction while recognizing with non-autoregressive end-to-end asr model.",
|
| 379 |
+
"author": "Xian Shi, Yanni Chen, Shiliang Zhang, and Zhijie Yan. 2023.",
|
| 380 |
+
"venue": "In arXiv preprint arXiv:2301.12343.",
|
| 381 |
+
"url": null
|
| 382 |
+
}
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"27": {
|
| 386 |
+
"title": "Seaco-paraformer: A non-autoregressive asr system with flexible and effective hotword customization ability.",
|
| 387 |
+
"author": "Xian Shi, Yexin Yang, Zerui Li, Yanni Chen, Zhifu Gao, and Shiliang Zhang. 2024.",
|
| 388 |
+
"venue": "In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 10346\u201310350. IEEE.",
|
| 389 |
+
"url": null
|
| 390 |
+
}
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"28": {
|
| 394 |
+
"title": "Looking enhances listening: Recovering missing speech using images.",
|
| 395 |
+
"author": "Tejas Srinivasan, Ramon Sanabria, and Florian Metze. 2020a.",
|
| 396 |
+
"venue": "In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 6304\u20136308. IEEE.",
|
| 397 |
+
"url": null
|
| 398 |
+
}
|
| 399 |
+
},
|
| 400 |
+
{
|
| 401 |
+
"29": {
|
| 402 |
+
"title": "Fine-grained grounding for multimodal speech recognition.",
|
| 403 |
+
"author": "Tejas Srinivasan, Ramon Sanabria, Florian Metze, and Desmond Elliott. 2020b.",
|
| 404 |
+
"venue": "In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 2667\u20132677.",
|
| 405 |
+
"url": null
|
| 406 |
+
}
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"30": {
|
| 410 |
+
"title": "Multimodal speech recognition with unstructured audio masking.",
|
| 411 |
+
"author": "Tejas Srinivasan, Ramon Sanabria, Florian Metze, and Desmond Elliott. 2020c.",
|
| 412 |
+
"venue": "In Proceedings of the First International Workshop on Natural Language Processing Beyond Text, pages 11\u201318.",
|
| 413 |
+
"url": null
|
| 414 |
+
}
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"31": {
|
| 418 |
+
"title": "Look, listen, and decode: Multimodal speech recognition with images.",
|
| 419 |
+
"author": "Felix Sun, David Harwath, and James Glass. 2016.",
|
| 420 |
+
"venue": "In 2016 IEEE Spoken Language Technology Workshop (SLT), pages 573\u2013578. IEEE.",
|
| 421 |
+
"url": null
|
| 422 |
+
}
|
| 423 |
+
},
|
| 424 |
+
{
|
| 425 |
+
"32": {
|
| 426 |
+
"title": "Attention is all you need.",
|
| 427 |
+
"author": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017.",
|
| 428 |
+
"venue": "Advances in neural information processing systems, 30.",
|
| 429 |
+
"url": null
|
| 430 |
+
}
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"33": {
|
| 434 |
+
"title": "Batgpt-chem: A foundation large model for chemical engineering.",
|
| 435 |
+
"author": "Yifei Yang, Runhan Shi, Zuchao Li, Shu Jiang, Yang Yang, Bao-Liang Lu, and Hai Zhao. 2024.",
|
| 436 |
+
"venue": null,
|
| 437 |
+
"url": null
|
| 438 |
+
}
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"34": {
|
| 442 |
+
"title": "Arcgpt: A large language model tailored for real-world archival applications.",
|
| 443 |
+
"author": "Shitou Zhang, Jingrui Hou, Siyuan Peng, Zuchao Li, Qibiao Hu, and Ping Wang. 2023.",
|
| 444 |
+
"venue": "arXiv preprint arXiv:2307.14852.",
|
| 445 |
+
"url": null
|
| 446 |
+
}
|
| 447 |
+
}
|
| 448 |
+
],
|
| 449 |
+
"url": "http://arxiv.org/html/2410.00822v2"
|
| 450 |
+
}
|
20241004/2410.01457v2.json
ADDED
|
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "Verbalized Graph Representation Learning: A Fully Interpretable Graph Model Based on Large Language Models Throughout the Entire Process",
|
| 3 |
+
"abstract": "Representation learning on text-attributed graphs (TAGs) has attracted significant interest due to its wide-ranging real-world applications, particularly through Graph Neural Networks (GNNs). Traditional GNN methods focus on encoding the structural information of graphs, often using shallow text embeddings for node or edge attributes. This limits the model to understand the rich semantic information in the data and its reasoning ability for complex downstream tasks, while also lacking interpretability. With the rise of large language models (LLMs), an increasing number of studies are combining them with GNNs for graph representation learning and downstream tasks. While these approaches effectively leverage the rich semantic information in TAGs datasets, their main drawback is that they are only partially interpretable, which limits their application in critical fields. In this paper, we propose a verbalized graph representation learning (VGRL) method which is fully interpretable. In contrast to traditional graph machine learning models, which are usually optimized within a continuous parameter space, VGRL constrains this parameter space to be text description which ensures complete interpretability throughout the entire process, making it easier for users to understand and trust the decisions of the model. We conduct several studies to empirically evaluate the effectiveness of VGRL and we believe this method can serve as a stepping stone in graph representation learning. The source code of our model is available at https://anonymous.4open.science/r/VGRL-7E1E",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "Introduction",
|
| 9 |
+
"text": "Many real-world graphs incorporate textual data, forming what are known as Text-Attributed Graphs (TAGs) (Yang et al., 2021 ###reference_b32###). In TAGs, nodes represent textual entitities such as papers, while edges denote relationships between them, such as citations or co-authorships. For instance, the Cora dataset can be modeled as a TAG, where each node represents a research paper, and the node attributes include features such as the paper\u2019s title, abstract, and keywords. By integrating textual attributes with graph topology, TAGs facilitate more effective representation learning, making them valuable for tasks like document classification, clustering (Wang et al., 2023 ###reference_b25###), citation analysis, and recommendation systems (Zhu et al., 2021 ###reference_b43###; Zhang et al., 2023a ###reference_b39###). This combination of textual and relational data offers deeper insights, especially when both content and connections are essential to the analysis.\nAlthough traditional Graph Neural Network (GNN) models, such as Graph Convolutional Network (GCN) (Kipf & Welling, 2016 ###reference_b6###) and Graph Attention Network (GAT) (Veli\u010dkovi\u0107 et al., 2017 ###reference_b22###), have achieved significant performance improvements across multiple tasks, they generally suffer from a lack of interpretability. As these models largely rely on complex network architectures and implicit feature learning processes, understanding their internal decision mechanisms and how specific features influence task outcomes becomes challenging, thereby limiting their transparency and trustworthiness in practical applications. To address this issue, researchers have proposed several interpretable GNN models.\nThese interpretable methods can generally be divided into input interpretability, training process interpretability, and decision-making process interpretability. For example, GNNExplainer (Ying et al., 2019 ###reference_b36###) is a method for input interpretability, which selects a small subgraph of the input graph together with a small subset of node features that are most influential for the prediction as an explanation, XGNN (Yuan et al., 2020 ###reference_b37###) is a method for training process interpretability which reveals the basis of the model\u2019s predictions by generating interpretable graph structures, and SE-SGformer (Li et al., 2024a ###reference_b8###) is a method for decision-making process interpretability which constructs a novel explainable decision process by discovering the -nearest (farthest) positive (negative) neighbors of a node for predicting edge signs. Clearly, while these methods all have a certain degree of interpretability, they can only explain one part of the entire process of model input, training, and output. Therefore, our goal is to implement comprehensive interpretability by simultaneously achieving input interpretability, training process interpretability, and decision-making process interpretability.\n###figure_1### In recent years, with the breakthroughs of large language models (LLMs) in the field of natural language processing, researchers have gradually begun to integrate them with GNNs to enhance model performance and capabilities.\nFor instance, LLMs can act as predictors (Tang et al., 2024 ###reference_b21###), generating more accurate predictions by analyzing node features and structural information for the TAGs. Also, TAPE (He et al., 2023 ###reference_b4###) prompts a powerful LLM to explain its predictions and serve explanations as supplementary text attributes for the downstream LMs and GNN models. Due to the powerful text inference capabilities of LLMs, they are capable of processing TAGs, reasoning about the node classification prediction process of TAGs, and generating explanations in text that is comprehensible to humans. Therefore, we consider the use of LLMs to achieve comprehensive interpretability.\nHowever, using LLMs to handle graph tasks and provide interpretability is not easy. Specifically, there are currently two main approaches to applying LLMs in the field of graph: one is to pre-train or fine-tune LLMs to adapt to various graph downstream tasks. But due to the vast number of parameters typically found in LLMs, the cost of fine-tuning LLMs is quite high and the training time is long. The second is to directly freeze the LLMs for inference but this method does not yield good results. For example, we directly froze the predictor LLMs for node classification prediction in subsequent experiments, and the prediction accuracy was generally not high, as shown in Table 5.2 ###reference_###.\nIn summary, we face two major challenges to achieve comprehensive interpretability with LLMs:\nChallenge 1: How can we ensure that a model is interpretable in terms of input, training process, and decision-making simultaneously?\nChallenge 2: How can we optimize the performance of LLMs without fine-tuning the model parameters to reduce costs?\nTo address these challenges, we propose the Verbalized Graph Representation Learning (VGRL) method. For Challenge 1, VGRL utilizes a verbalized approach to create intuitive connections between input features and predictions and VGRL generates textual explanations at each iteration stage, helping researchers and practitioners better grasp the training dynamics of the model. Also, VGRL provides natural language descriptions for the model\u2019s predictions, clearly explaining the rationale behind each decision. For Challenge 2, instead of relying on costly fine-tuning of the LLM parameters, VGRL leverages a prompt-based optimization strategy. This involves crafting task-specific prompts to guide the LLM in generating optimal predictions without modifying its internal parameters. By utilizing prompt engineering techniques, VGRL maintains high performance while significantly reducing computational costs associated with traditional fine-tuning methods. Additionally, this approach allows the model to remain versatile across various tasks, as it can be adapted to new datasets or problems simply by adjusting the prompts, further enhancing its efficiency and scalability.\nOur contributions are as follows:\nWe propose a novel verbalized graph learning framework that ensures complete interpretability throughout the entire process, from input to training and decision-making, enabling users to fully understand the operational mechanisms of the model.\nWe seek to reduce the high GPU overhead associated with pre-training or fine-tuning in current graph plus LLMs paradigms by utilizing a new model optimization approach, known as Iterative Training through Prompt Optimization.\nWe validate the effectiveness of this method from multiple perspectives on real-world datasets."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "2",
|
| 13 |
+
"parent_section_id": null,
|
| 14 |
+
"section_name": "Preliminaries",
|
| 15 |
+
"text": "In this section, we introduce the essential concepts, notations, and problem settings considered in this research. Our primary focus is on the node classification task over text-attributed graphs, which represents a fundamental downstream task within the field of graph learning. We begin by defining text-attributed graphs.\nText-Attributed Graphs. Text-attributed graphs (TAGs) can be formally described as , where represents a set of nodes, is the adjacency matrix, and denotes a sequential text associated with each node . Here, is the dictionary of words or tokens, and is the length of the sequence. In this paper, we focus on the problem of node classification in TAGs. Specifically, given a subset of labeled nodes , the task is to predict the labels of the remaining unlabeled nodes .And iterates over the input mini-batch one-pass input.\nOne-hop neighbors. Given a node , the set of one-hop neighbors, denoted as , where\n-hop neighbors. Given a node , for , the -hop neighbors of can be denoted as , where ."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "3",
|
| 19 |
+
"parent_section_id": null,
|
| 20 |
+
"section_name": "Related Work",
|
| 21 |
+
"text": "In this section, we review the existing literature related to integrating Large Language Models (LLMs) and Graph Neural Networks (GNNs). Prior work has focused on several key areas, including traditional methods for trusted GNNs, the role of LLMs in graph-based tasks, and recent advances in optimization frameworks utilizing LLMs. We explore these approaches to highlight their contributions and limitations, establishing the foundation for our proposed Verbalized Graph Representation Learning (VGRL) framework."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "3.1",
|
| 25 |
+
"parent_section_id": "3",
|
| 26 |
+
"section_name": "Graph and LLMs",
|
| 27 |
+
"text": "Traditional approaches to trusted GNNs.\nThere are currently two main approaches: post-hoc explanation methods and self-interpretable models . The former tries to interpret the output of the model by adding a model-independent interpreter, for example (Ying et al., 2019 ###reference_b36###; Vu & Thai, 2020 ###reference_b23###; Zhang et al., 2023b ###reference_b40###). However, this can lead to incomplete explanatory information in the output, or even generate explanatory information that is incorrect in the opinion of humans. The latter tries to solve this problem by constructing models that themselves have interpretable principles, for example (Dai & Wang, 2021 ###reference_b2###; Zhang et al., 2022a ###reference_b38###). However, these interpretable principles are based on their inductive bias, and only experts in the relevant fields can accurately judge whether such inductive bias is reasonable or not.\nLLM in Graph.\nExisting methods are mainly categorized into three types: (1) LLM as Enhancer which mainly enhances the performance of GNNs by adding LLM-generated information, for example (He et al., 2023 ###reference_b4###; Chen et al., 2024 ###reference_b1###; Ni et al., 2024 ###reference_b15###); (2) LLM as Predictor which mainly performs a downstream task by directly inputting the graph structure into the LLM, for example (Tang et al., 2024 ###reference_b21###; Qin et al., 2023 ###reference_b18###); (3) LLM as Alignment which mainly enhances the performance by aligning embedding spaces of GNNs and LLMs, for example (Yang et al., 2021 ###reference_b32###; Mavromatis et al., 2023 ###reference_b13###). Among them, there is explanation-based LLM-as-Enhancer approach (He et al., 2023 ###reference_b4###), which achieves better performance by letting LLM generate natural language explanation information of graph structures and then embedding it into GNNs for downstream tasks. However, after the embedding from natural language to graph structure is not directly visible as a black box to humans, and can only be proven effective indirectly through the performance of downstream tasks."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "3.2",
|
| 31 |
+
"parent_section_id": "3",
|
| 32 |
+
"section_name": "LLMs Optimization",
|
| 33 |
+
"text": "LLMs for planning and optimization. Large language models (LLMs) have been successfully applied to planning tasks for embodied agents (Song et al., 2023 ###reference_b19###; Xie et al., 2023 ###reference_b31###; Li et al., 2022 ###reference_b9###; Liang et al., 2023 ###reference_b11###), enabling them to follow natural language instructions and complete complex tasks. More recently, LLMs have also been utilized to tackle optimization problems by generating new solutions from prompts that incorporate previously generated solutions and their associated loss values. While these LLM-based optimization (Xiao et al., 2024 ###reference_b30###; Yang et al., 2024 ###reference_b33###) methods bear some resemblance to our approach, as we also use LLMs to address optimization challenges, a key limitation of existing work is that it has not yet been explored in the graph domain. To address this gap, we propose an extension of this framework to the graph domain, introducing Verbalized Graph Representation Learning (VGRL), which applies LLMs to graph neural networks (GNNs) and opens new possibilities for solving graph-based optimization problems through natural language interactions.\nPrompt engineering and optimization. Numerous prompting techniques (Wei et al., 2022 ###reference_b26###; Zhang et al., 2022b ###reference_b41###; Zhou et al., 2022 ###reference_b42###; Wang et al., 2022 ###reference_b24###; Yao et al., 2024 ###reference_b34###; 2023 ###reference_b35###; Weston & Sukhbaatar, 2023 ###reference_b28###) have been developed to enhance the reasoning capabilities of LLMs. To minimize the manual effort required in designing effective prompts, various automatic prompt optimization approaches (Zhang et al., 2022b ###reference_b41###; Zhou et al., 2022 ###reference_b42###; Yang et al., 2024 ###reference_b33###; Pryzant et al., 2023 ###reference_b16###; Wen et al., 2024 ###reference_b27###; Deng et al., 2022 ###reference_b3###; Li et al., 2024b ###reference_b10###; Ma et al., 2024 ###reference_b12###; Sordoni et al., 2023 ###reference_b20###) have been introduced. However, traditional prompt optimization methods primarily focus on refining the text prompt without changing its underlying semantic meaning. In contrast, our VGRL framework goes beyond mere prompt adjustments by directly updating the parameters of the language-based model through the integration or modification of prior information. This not only improves optimization but also ensures that the learner model remains fully interpretable in its predictions, offering a more robust and transparent solution for graph-based learning tasks.\nLLMs for multi-agent systems. Given their strong instruction-following capabilities, LLMs can assume various roles within multi-agent systems (Qian et al., 2023 ###reference_b17###; Wu et al., 2023 ###reference_b29###; Hong et al., 2023 ###reference_b5###; Li et al., 2023 ###reference_b7###). For instance, explore multi-agent collaboration systems designed to solve complex tasks such as software development. In the VGRL framework, this concept is extended to a two-agent system, where one LLM functions as the learner and the other as the optimizer.\nOur approach sidesteps the problem of modeling black boxes by having the LLM generate human-readable information as promt of another LLM making it perform the downstream task. This can be viewed as a \u201cguidance-feedback-redirection\u201d process between models, which, after many iterations, returns the optimal guidance solution for a given task, which is directly human-readable.\n###figure_2###"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "4",
|
| 37 |
+
"parent_section_id": null,
|
| 38 |
+
"section_name": "Proposed Method",
|
| 39 |
+
"text": "In this paper, we present the Verbalized Graph Representation Learning (VGRL) framework, a pioneering approach that integrates large language models (LLMs) with graph-based tasks while ensuring full interpretability throughout the process. Our methodology encompasses four innovative components, each designed to enhance both the performance and the transparency of LLMs in handling graph data."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "4.1",
|
| 43 |
+
"parent_section_id": "4",
|
| 44 |
+
"section_name": "Incorporating Graph Structure into LLM Predictions",
|
| 45 |
+
"text": "Although Large Language Models (LLMs) can achieve competitive zero-shot performance on specific datasets without considering graph structures, their performance often lags behind Graph Neural Networks (GNNs) on benchmark datasets such as CORA, CITESEER, and OGBN-ARXIV. This gap underscores the importance of graph structural information in tasks like node classification, indicating the need to explore how incorporating graph structures into prompts could enhance LLM performance.\nGiven that LLMs (e.g., ChatGPT) are not natively designed to process adjacency matrices or perform graph-based computations, it is impractical to directly integrate graph operations into LLMs. Thus, an alternative approach is to verbalize graph information as text that LLMs can process effectively. This transformation allows LLMs to interpret node relationships and structural dependencies in natural language format. In (Chen et al., 2024 ###reference_b1###), various methods are evaluated to represent node connections textually, aiming to enhance LLM reasoning capabilities for graph-based tasks.\nOne effective method is the \u2018ego-graph\u2019 approach, which focuses on the local subgraph surrounding a target node. By constraining the LLM\u2019s focus to a limited number of nodes, this method reduces complexity while preserving key local graph structure. To simulate the neighborhood aggregation process typical in GNNs, the input prompt incorporates a summary of attributes from neighboring nodes. Thus, important information from the graph is conveyed to the LLM without altering its reasoning mechanisms. This process can be formalized as:\nwhere is the enhanced representation of node with one-hop neighbor information, represents the features of node , and denotes the set of one-hop neighbors of . The function encapsulates the process of verbalizing neighborhood information and processing it by the LLM.\nInspired by this ego-graph approach, we have also introduced a method for incorporating structural information into our model. By embedding the attributes and relationships of neighboring nodes into the prompt, we aim to enable the LLM to better capture the interactions between nodes. Below is an example of a neighbor summary in Table 1 ###reference_###:\nPrompts used to summarize the neighboring information\nI will now give you basic information about all the papers cited in a paper; this information includes: the abstracts and categories of the cited papers.\nThe following list records some papers related to the current one.\n[{ \u201dcontent\u201d: \u201dThis paper firstly provides \u2026\u201d, \u201dcategory\u201d: \u201dRule Learning\u201d\u2026 }, \u2026]\n# Instruction\nPlease summarize the information above with a short paragraph, find some common points which can reflect the category of this paper.\nNote: ONLY your summary information and NOTHING ELSE!\nPlease start with \u201dThe papers cited in this essay\u201d."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "4.2",
|
| 49 |
+
"parent_section_id": "4",
|
| 50 |
+
"section_name": "Verbalizing Model Parameters for Interpretability",
|
| 51 |
+
"text": "Traditional machine learning models, such as neural networks, rely on numerical parameters, , which are often difficult to interpret. These parameters are typically represented as abstract numerical values, making it complex and non-intuitive to understand or explain the internal workings of the model. In contrast, the Verbalized Graph Representation Learning (VGRL) framework leverages large language models (LLMs) to express model parameters through natural language, providing full interpretability.\nIn VGRL, the model parameters are defined by a text prompt, which consists of human-readable natural language tokens, , where is the set of all interpretable text sequences. This approach contrasts with traditional models where parameters are abstract numbers, which are hard to interpret directly. The VGRL framework unifies both data and model parameters into a natural language-based format that is inherently understandable.\nThe key features of this framework include:\nDiscrete Parameters: The natural language used to express parameters is inherently discrete. This is in contrast to the continuous parameter representations in traditional models, enhancing the intuitiveness of parameter interpretation.\nSequential Structure: The parameters exhibit a sequential structure, as , reflecting the temporal or contextual relationships between parameters. This sequential nature aids in capturing and understanding the dynamics between parameters.\nHuman Interpretability: Since the parameters are verbalized in natural language, they are inherently comprehensible to humans. This allows the model\u2019s reasoning process and learning mechanisms to be more transparent, facilitating interpretability and easier analysis.\nAn advantage of using natural language for model parameters is that it enables the integration of prior knowledge and inductive biases directly into the model. As the model updates its parameters , the changes are fully interpretable, providing clear insights into what the model is learning. For example, changes in can be directly mapped to natural language descriptions, offering an intuitive understanding of the model\u2019s learning process.\nOur empirical evidence demonstrates that text-based parameters often correspond to recognizable patterns in the data, further reinforcing the interpretability and transparency of the VGRL approach. This natural language parameterization not only enhances the intuitiveness of model but also improves its application, offering clearer insights into model tuning and interpretation in real-world scenarios."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "4.3",
|
| 55 |
+
"parent_section_id": "4",
|
| 56 |
+
"section_name": "Leveraging LLMs for Node Classification",
|
| 57 |
+
"text": "Our approach centers on utilizing LLMs as interpretable predictors by querying them in an \u2018open-ended\u2019 manner. Unlike existing methods that primarily rely on message passing mechanisms, our method employs a label feature matching mechanism. We match based on the inherent characteristics of the nodes themselves and the information from their neighbors. This label feature matching mechanism places a stronger emphasis on the intrinsic attributes of node, as it aligns with the insights provided in the prompt.\nThe core of this method is represented by the following equation:\nHere, denotes the predicted label for node , and represents the enhanced node representation incorporating \u2019s -hop neighbors. refers to the LLM\u2019s parameters at the previous step, enabling the model to leverage its prior knowledge and reasoning capabilities to generate the prediction. The function serves as the predictor that utilizes the enhanced representation and model parameters to produce the label output. This formulation emphasizes the LLM\u2019s role as a predictor, focusing on generating interpretable outputs.\nFor each node , a prompt is crafted that includes not only the node\u2019s features, such as the paper title and abstract, but also relevant graph structure information. Specifically, the attributes of neighboring nodes up to the -hop neighborhood are embedded in the prompt, as encapsulated in . This enables the LLM to better understand the node\u2019s context and surroundings within the graph, leading to more informed and accurate predictions."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "4.4",
|
| 61 |
+
"parent_section_id": "4",
|
| 62 |
+
"section_name": "LLM as an optimizer with interpretable optimization process",
|
| 63 |
+
"text": "For the predictor LLM, we provide textual descriptions of node categories, which serve as model parameter , and the model determines which category the input node belongs to based on the given descriptions. The quality of node category descriptions directly affects the performance of LLM predictions; hence, obtaining suitable node category descriptions is very important. Additionally, for better explainability, VGRL imposes a strong constraint on , ensuring that the updated still belong to natural language sequences that humans can understand.\nUnder these conditions, it is not advisable to use classical machine learning optimization methods such as gradient descent to optimize . Inspired by Xiao et al. (2024 ###reference_b30###), the optimizer LLM can output natural language that satisfies the constraints, so we only need to ask the LLM to play the role of an optimizer, then optimized category descriptions are also in natural language understandable by humans. Therefore, we directly use another LLM to optimize . Given a mini-batch , the optimization process is as follows:\nwhere is the true label of , represents the intermediate parameter values for node during the -th iteration, and denotes the parameter of the optimizer LLM, which is a text prompt. Specifically, we optimize the intermediate parameter value of each node in , and then summarize the intermediate parameter values of these nodes through a summary LLM (Section 4.5 ###reference_###) to obtain a new round of parameter .\nThe overall framework for optimizer optimization and the text prompt template are given in Figure 2 ###reference_###. The parameter of the optimizer LLM is actually a text prompt provided by humans and is not updated. The text prompt linguistically specifies the optimization loss function, guiding the optimizer LLM to optimize . The LLM-parameterized optimizer allows users to interact with it directly, which not only helps to trace model failures but also permits the incorporation of prior knowledge to enhance optimization.\nIn addition, we also guide the LLM to output explanations of the optimization process, demonstrating the explainability of the VGRL optimization process."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "4.5",
|
| 67 |
+
"parent_section_id": "4",
|
| 68 |
+
"section_name": "Summary LLM",
|
| 69 |
+
"text": "The role of the Summary LLM is to aggregate and summarize the updated intermediate parameters from the optimizer LLM, generated during the previous minibatch, to obtain updated . Specifically, given a set of updated parameters from the last minibatch , the Summary LLM consolidates these updates into a new set of parameters, . This process can be formalized as:\nHere, represents the intermediate parameter values for node during the -th iteration, and denotes the set of nodes in the current minibatch. The function operates by combining these parameter updates to produce a cohesive set of parameters, , which reflects the overall learning progress across the minibatch. This aggregation ensures that key information from each node\u2019s updated parameters is captured while maintaining coherence in the overall optimization process."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "4.6",
|
| 73 |
+
"parent_section_id": "4",
|
| 74 |
+
"section_name": "Chain-of-Thought Prompting",
|
| 75 |
+
"text": "Inspired by (Wei et al., 2022 ###reference_b26###), we introduce the zero-shot and one-shot Chain-of-Thought (CoT) methods in prompt. For the zero-shot method, we encourage the LLM to perform step-by-step text generation by restricting and guiding the LLM to make the generated explanatory information as structured and precise as possible, in order to achieve a better final result generation based on the self-generated information. Although zero-shot VGRL is already fully interpretable, we still want to customize the interpretation in specific domains to ensure that the interpretation information is more in line with the norms of the human mind and thus enhance the model\u2019s performance. Therefore, we introduce the one-shot method by manually constructing a sample of the CoT, so that the model can generate the interpretation information and the final output based on the sample. The motivation for the one-shot approach is that we believe that the content generated by the LLM based on a sample that conforms to the logic of the human mind will better contribute to the completion of the final task."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "5",
|
| 79 |
+
"parent_section_id": null,
|
| 80 |
+
"section_name": "Experiments",
|
| 81 |
+
"text": "In this section, We will compare the performance of the VGRL framework with diverse backbone models for the TAG node classification task.We will answer the following questions:\nQ1: Can VGRL framework increase the performance of backbone models?\nQ2: Do each part of the VGRL framework play a positive role?"
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "5.1",
|
| 85 |
+
"parent_section_id": "5",
|
| 86 |
+
"section_name": "Baseline and Experiment Setting",
|
| 87 |
+
"text": "We use two LLM-as-predictor models as backbones (Chen et al., 2024 ###reference_b1###), and add our framework on top of them for comparisons. Information on our equipment can be found at Table 2 ###reference_###.\nNode only: \u2018node only\u2019 refers to the features considering only the node itself, excluding any neighbor information.\nSummary: \u2018Summary\u2019 indicates that we used an independent LLM to summarize the node\u2019s -hop information, which can be viewed as the introduction of an enhancer LLM for encoding the graph structure. The prompt for the enhancer LLM is shown in Table 1 ###reference_###.\nDuring the experiments, we used one-hop neighbor information for summarization and set model temperature as default. Additionally, we introduced prior knowledge in our comparison by manually constructing prior knowledge as the initial optimize for iterative processing. And we setting a mini-batch training process with a batch size of 8, i.e. ."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "5.2",
|
| 91 |
+
"parent_section_id": "5",
|
| 92 |
+
"section_name": "Main Results (Q1)",
|
| 93 |
+
"text": "We conducted evaluations on the Cora TAG (McCallum et al., 2000 ###reference_b14###) dataset (See AppendixB ###reference_###) by comparing our optimization iterative process with the baseline that excludes the VGRL framework (Chen et al., 2024 ###reference_b1###). The results are presented in Table 5.2 ###reference_###. We extracted a subset of nodes from the Cora dataset as our experimental data. For further steps, we blurred the concept of epochs and treated each batch as a single step.\nOur comparison reveals that our framework, through the iterative process, achieves better performance, demonstrating the effectiveness of the VGRL framework in representation learning. VGRL gradually refines the label features through repeated iterations, as shown in Figure 3 ###reference_###, which illustrates the change in test accuracy during the mini-batch iterations. Additionally, we used the open-source Llama3.1 8B model for all experiments, which not only significantly reduced costs but also proved the optimization capability of the framework itself."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "5.3",
|
| 97 |
+
"parent_section_id": "5",
|
| 98 |
+
"section_name": "Ablation Experiments (Q2)",
|
| 99 |
+
"text": "We conducted ablation experiments on the Summary + VGRL architecture to assess the importance and relevance of each module. The results of the ablation experiments are shown in Table 5.2 ###reference_###.\nw/o optimizer LLM: This variant removes the optimizer LLM, i.e., there is no iterative optimization process, which is equivalent to using the predictor LLM to make the final decision.\nw/o summary LLM: This variant removes the summary LLM, i.e., after each optimization update, instead of summarizing the information through the summary LLM, the results of a batch update are directly used in the next iteration."
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5.4",
|
| 103 |
+
"parent_section_id": "5",
|
| 104 |
+
"section_name": "Case Study",
|
| 105 |
+
"text": "###figure_3### To explore the impact of the VGRL framework on the TAG node classification task, we conducted an analysis of a particular training sample from the Cora dataset, as shown in Figure 4 ###reference_###. In the paper \u2018Evolving Sensors in Environments of Controlled Complexity\u2019 the one-hop neighboring nodes all have the label \u2018Genetic_Algorithms\u2019 while the actual label of the node is \u2018Reinforcement_Learning\u2019 This heterogeneity can significantly disrupt the node\u2019s feature information during neighborhood aggregation, resulting in biased classification results. However, VGRL is able to effectively capture unique characteristics of each category, using them as a basis for matching the node\u2019s own features. This addresses the issue of information corruption caused by the propagation mechanism in heterogeneous graphs.\nMoreover, in the Cora dataset, paper categories cannot be strictly divided into binary classes. It is not uncommon for some nodes to belong to two categories simultaneously. In such cases, the label-feature matching mechanism proves to be more reasonable than the message-passing mechanism, as it focuses more on the node\u2019s own information (as can be inferred from the formulation of ). Making judgments and decisions based on one\u2019s existing knowledge () is the most fundamental decision-making process for humans.\n\u2018Judgment\u2019 and \u2018Step-by-Step Analysis\u2019 represent the model\u2019s label matching process, which is also human-readable and interpretable. Whether its the Predictor LLM\u2019s process of analyzing the node\u2019s own features and supplementing it with neighborhood information, or the Optimizer LLM\u2019s analysis and adjustment of the two categories involved in classification errors, both demonstrate a complete and interpretable optimization process. The model explains each update iteration in detail, presenting it in human-readable language. With the help of the Summary LLM, the Predictor LLM and Optimizer LLM communicate and feedback effectively, ultimately constructing the best decision-making basis from scratch for the node classification task on the current dataset.\nFor a detailed training process see Appendix C ###reference_### to Appendix G ###reference_###."
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"section_id": "6",
|
| 109 |
+
"parent_section_id": null,
|
| 110 |
+
"section_name": "theoretical analysis",
|
| 111 |
+
"text": "In this section, our goal is to demonstrate that the category descriptions generated by LLM can provide useful information for predicting label categories. Specifically, if the obtained category descriptions can faithfully represent the information of each category, then they are useful. At the same time, the LLM is non-redundant, as it can provide information that cannot provide. Let be the textual category descriptions generated by LLM; are the embeddings of category from the LLM; are the input of graph structure embeddings, is the target and is the conditional entropy. The specific proof process can be found in Appendix A ###reference_###.\nGiven the following conditions:\n1) Fidelity: can faithfully represent the information of such that with ;\n2)Non-redundancy: contains information not present in , that is,\n, with . Then it follows that ."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"section_id": "7",
|
| 115 |
+
"parent_section_id": null,
|
| 116 |
+
"section_name": "Conclusion",
|
| 117 |
+
"text": "This paper introduces Verbalized Graph Representation Learning (VGRL), a novel approach to text-attributed graph learning that ensures full interpretability by representing learned parameters as textual descriptions instead of continuous vectors. This method enhances transparency and user understanding of the decision-making process, fostering greater trust in the model\u2019s outputs. While the current application is limited to foundational graph learning paradigms, VGRL shows promise for broader use in more complex models, offering potential advancements in explainable AI and graph-based learning systems."
|
| 118 |
+
}
|
| 119 |
+
],
|
| 120 |
+
"appendix": [
|
| 121 |
+
{
|
| 122 |
+
"section_id": "Appendix x1",
|
| 123 |
+
"parent_section_id": null,
|
| 124 |
+
"section_name": "Appendix",
|
| 125 |
+
"text": ""
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"section_id": "Appendix 1",
|
| 129 |
+
"parent_section_id": null,
|
| 130 |
+
"section_name": "Appendix A Theoretical analysis",
|
| 131 |
+
"text": "In this section, our goal is to demonstrate that the category descriptions generated by LLM can provide useful information for predicting label categories. We formulate our theorem as follows:\nGiven the following conditions:\n1) Fidelity: can faithfully represent the information of such that\n2)Non-redundancy: contains information not present in , that is\nThen we can obtain:\nwhere be the textual category descriptions generated by ; are the embeddings of category from the ; are the input of graph structure embeddings, is the target and is the conditional entropy.\nWe aim to demonstrate that , the process is following:\nStart with:\nWe decompose the original expression Equation 8 ###reference_### into two parts based on the properties of entropy:\nBased on the definition of mutual information, we can obtain:\nDue to the non-negativity of conditional entropy, we have:\nBy substituting Equation 11 ###reference_### into Equation 9 ###reference_###, we further obtain:\nWhen conditional variables decrease, the conditional entropy increases; so we have:\nApplying the two aforementioned conditions and substituting Equations 5 ###reference_### and 6 ###reference_### into Equation 12 ###reference_###, we can obtain:\nThe conclusion is thus proven.\n\u220e"
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"section_id": "Appendix 2",
|
| 135 |
+
"parent_section_id": null,
|
| 136 |
+
"section_name": "Appendix B Dataset Description",
|
| 137 |
+
"text": "Cora (McCallum et al., 2000 ###reference_b14###): The Cora dataset consists of Machine Learning papers. These papers are classified into one of the following seven classes: Case_Based, Genetic_Algorithms, Neural_Networks, Probabilistic_Methods, Reinforcement_Learning, Rule_Learning, Theory. The papers were selected in a way such that in the final corpus every paper cites or is cited by atleast one other paper. There are 2708 papers and 5429 links in the whole corpus."
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"section_id": "Appendix 3",
|
| 141 |
+
"parent_section_id": null,
|
| 142 |
+
"section_name": "Appendix C one-shot CoT",
|
| 143 |
+
"text": "The one-shot example.\n###figure_4###"
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"section_id": "Appendix 4",
|
| 147 |
+
"parent_section_id": null,
|
| 148 |
+
"section_name": "Appendix D Training Detail for Summary + VGRL",
|
| 149 |
+
"text": "###figure_5### ###figure_6### ###figure_7### ###figure_8### ###figure_9### ###figure_10### ###figure_11### ###figure_12### ###figure_13###"
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"section_id": "Appendix 5",
|
| 153 |
+
"parent_section_id": null,
|
| 154 |
+
"section_name": "Appendix E one-shot w/ prior Summary + VGRL",
|
| 155 |
+
"text": "###figure_14### ###figure_15### ###figure_16### ###figure_17### ###figure_18### ###figure_19### ###figure_20### ###figure_21### ###figure_22###"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"section_id": "Appendix 6",
|
| 159 |
+
"parent_section_id": null,
|
| 160 |
+
"section_name": "Appendix F zero-shot w/o prior Summary + VGRL",
|
| 161 |
+
"text": "###figure_23### ###figure_24### ###figure_25### ###figure_26### ###figure_27### ###figure_28### ###figure_29### ###figure_30### ###figure_31###"
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"section_id": "Appendix 7",
|
| 165 |
+
"parent_section_id": null,
|
| 166 |
+
"section_name": "Appendix G zero-shot w/ prior Summary + VGRL",
|
| 167 |
+
"text": "###figure_32### ###figure_33### ###figure_34### ###figure_35### ###figure_36### ###figure_37### ###figure_38### ###figure_39### ###figure_40###"
|
| 168 |
+
}
|
| 169 |
+
],
|
| 170 |
+
"tables": {
|
| 171 |
+
"1": {
|
| 172 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S4.T1\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 1: </span>Prompts used to generate neighbor summary.</figcaption>\n<p class=\"ltx_p ltx_align_center\" id=\"S4.T1.1\"><span class=\"ltx_rule\" style=\"width:397.5pt;height:2.0pt;background:black;display:inline-block;\">\u00a0</span>\n<span class=\"ltx_inline-block ltx_parbox ltx_align_middle\" id=\"S4.T1.1.1\" style=\"width:397.5pt;\">\n<span class=\"ltx_p\" id=\"S4.T1.1.1.1\"><span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.1.1\">Prompts used to summarize the neighboring information</span></span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.2\">I will now give you basic information about all the papers cited in a paper; this information includes: the abstracts and categories of the cited papers.\nThe following list records some papers related to the current one.</span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.3\"><span class=\"ltx_text\" id=\"S4.T1.1.1.3.1\">[</span>{ \u201dcontent\u201d: \u201dThis paper firstly provides \u2026\u201d, \u201dcategory\u201d: \u201dRule Learning\u201d\u2026 }, \u2026<span class=\"ltx_text\" id=\"S4.T1.1.1.3.2\">]</span></span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.4\"># <span class=\"ltx_text ltx_font_bold\" id=\"S4.T1.1.1.4.1\">Instruction</span></span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.5\">Please summarize the information above with a short paragraph, find some common points which can reflect the category of this paper.</span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.6\">Note: ONLY your summary information and NOTHING ELSE!</span>\n<span class=\"ltx_p\" id=\"S4.T1.1.1.7\">Please start with \u201dThe papers cited in this essay\u201d.</span>\n</span>\n<span class=\"ltx_rule\" style=\"width:397.5pt;height:2.0pt;background:black;display:inline-block;\">\u00a0</span></p>\n</figure>",
|
| 173 |
+
"capture": "Table 1: Prompts used to generate neighbor summary."
|
| 174 |
+
},
|
| 175 |
+
"2": {
|
| 176 |
+
"table_html": "<figure class=\"ltx_table ltx_align_floatright\" id=\"S5.T2\">\n<figcaption class=\"ltx_caption ltx_centering\"><span class=\"ltx_tag ltx_tag_table\">Table 2: </span>Information on our equipment</figcaption>\n<div class=\"ltx_inline-block ltx_align_center ltx_transformed_outer\" id=\"S5.T2.1\" style=\"width:166.9pt;height:81.5pt;vertical-align:-0.0pt;\"><span class=\"ltx_transformed_inner\" style=\"transform:translate(-45.6pt,22.3pt) scale(0.646473079994084,0.646473079994084) ;\">\n<table class=\"ltx_tabular ltx_guessed_headers ltx_align_middle\" id=\"S5.T2.1.1\">\n<thead class=\"ltx_thead\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.1.1\">\n<th class=\"ltx_td ltx_align_center ltx_th ltx_th_column ltx_border_t\" colspan=\"2\" id=\"S5.T2.1.1.1.1.1\">Devices</th>\n</tr>\n</thead>\n<tbody class=\"ltx_tbody\">\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.2.1\">\n<td class=\"ltx_td ltx_align_center ltx_border_r ltx_border_t\" id=\"S5.T2.1.1.2.1.1\">OS</td>\n<td class=\"ltx_td ltx_align_center ltx_border_t\" id=\"S5.T2.1.1.2.1.2\">Ubuntu 22.04.4 LTS x86_64</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.3.2\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.1.3.2.1\">Language</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.3.2.2\">Python 3.10.14</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.4.3\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.1.4.3.1\">Frameworks</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.4.3.2\">pytorch 2.4.0 + cuda 12.4</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.5.4\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.1.5.4.1\">CPU</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.5.4.2\">Intel Xeon Silver 4310 (48) @ 3.300GHz</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.6.5\">\n<td class=\"ltx_td ltx_align_center ltx_border_r\" id=\"S5.T2.1.1.6.5.1\">GPU</td>\n<td class=\"ltx_td ltx_align_center\" id=\"S5.T2.1.1.6.5.2\">3 * NVIDIA L20 (48G)</td>\n</tr>\n<tr class=\"ltx_tr\" id=\"S5.T2.1.1.7.6\">\n<td class=\"ltx_td ltx_align_center ltx_border_b ltx_border_r\" id=\"S5.T2.1.1.7.6.1\">Memory</td>\n<td class=\"ltx_td ltx_align_center ltx_border_b\" id=\"S5.T2.1.1.7.6.2\">128508MiB</td>\n</tr>\n</tbody>\n</table>\n</span></div>\n</figure>",
|
| 177 |
+
"capture": "Table 2: Information on our equipment"
|
| 178 |
+
},
|
| 179 |
+
"3": {
|
| 180 |
+
"table_html": "<figure class=\"ltx_table\" id=\"S5.T3\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 3: </span>Node classification accuracy for the Cora dataset</figcaption>\n</figure>",
|
| 181 |
+
"capture": "Table 3: Node classification accuracy for the Cora dataset"
|
| 182 |
+
},
|
| 183 |
+
"4": {
|
| 184 |
+
"table_html": "<figure class=\"ltx_table ltx_align_center\" id=\"S5.T4\">\n<figcaption class=\"ltx_caption\"><span class=\"ltx_tag ltx_tag_table\">Table 4: </span>Ablation study on the Cora dataset, showing the effects of different variants base on Summary + VGRL on the accuracy performance</figcaption>\n</figure>",
|
| 185 |
+
"capture": "Table 4: Ablation study on the Cora dataset, showing the effects of different variants base on Summary + VGRL on the accuracy performance"
|
| 186 |
+
}
|
| 187 |
+
},
|
| 188 |
+
"image_paths": {
|
| 189 |
+
"1": {
|
| 190 |
+
"figure_path": "2410.01457v2_figure_1.png",
|
| 191 |
+
"caption": "Figure 1: Comparison of Graph Representation Learning Methods\n(a) Traditional Graph Neural Networks (GNNs) rely on graph structures and initial features for embedding generation and prediction.\n(b) Incorporating a Language Model (LM) enhances GNNs, where a Large Language Model (LLM) provides explanations that refine the embedding process for improved predictions.\n(c) Our proposed Verbalized Graph Representation Learning (VGRL) framework introduces an iterative optimization process involving multiple frozen LLMs (Enhancer, Predictor, Optimizer, and Summary), emphasizing interpretability and parameter tuning through verbalized model adjustments.",
|
| 192 |
+
"url": "http://arxiv.org/html/2410.01457v2/x1.png"
|
| 193 |
+
},
|
| 194 |
+
"2": {
|
| 195 |
+
"figure_path": "2410.01457v2_figure_2.png",
|
| 196 |
+
"caption": "Figure 2: An overview of iterative optimization and text prompt templates for the predictor, optimizer, and summary LLM in the node classification example",
|
| 197 |
+
"url": "http://arxiv.org/html/2410.01457v2/x2.png"
|
| 198 |
+
},
|
| 199 |
+
"4": {
|
| 200 |
+
"figure_path": "2410.01457v2_figure_4.png",
|
| 201 |
+
"caption": "Figure 4: Case study for one-shot wo prior Summary + VGRL: (1) The left figure shows the explanation information and prediction labels output by predictor LLM; (2) The right figure shows the optimization process of optimizer LLM for the predicted content of predictor LLM in the left figure.(3) The top-right figure shows an example of the one-hop neighbors of a predicted sample.",
|
| 202 |
+
"url": "http://arxiv.org/html/2410.01457v2/x3.png"
|
| 203 |
+
}
|
| 204 |
+
},
|
| 205 |
+
"validation": true,
|
| 206 |
+
"references": [
|
| 207 |
+
{
|
| 208 |
+
"1": {
|
| 209 |
+
"title": "Exploring the potential of large language models (llms) in learning on graphs.",
|
| 210 |
+
"author": "Zhikai Chen, Haitao Mao, Hang Li, Wei Jin, Hongzhi Wen, Xiaochi Wei, Shuaiqiang Wang, Dawei Yin, Wenqi Fan, Hui Liu, et al.",
|
| 211 |
+
"venue": "ACM SIGKDD Explorations Newsletter, 25(2):42\u201361, 2024.",
|
| 212 |
+
"url": null
|
| 213 |
+
}
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"2": {
|
| 217 |
+
"title": "Towards self-explainable graph neural network.",
|
| 218 |
+
"author": "Enyan Dai and Suhang Wang.",
|
| 219 |
+
"venue": "In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pp. 302\u2013311, 2021.",
|
| 220 |
+
"url": null
|
| 221 |
+
}
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"3": {
|
| 225 |
+
"title": "Rlprompt: Optimizing discrete text prompts with reinforcement learning.",
|
| 226 |
+
"author": "Mingkai Deng, Jianyu Wang, Cheng-Ping Hsieh, Yihan Wang, Han Guo, Tianmin Shu, Meng Song, Eric P Xing, and Zhiting Hu.",
|
| 227 |
+
"venue": "arXiv preprint arXiv:2205.12548, 2022.",
|
| 228 |
+
"url": null
|
| 229 |
+
}
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"4": {
|
| 233 |
+
"title": "Harnessing explanations: Llm-to-lm interpreter for enhanced text-attributed graph representation learning.",
|
| 234 |
+
"author": "Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi.",
|
| 235 |
+
"venue": "arXiv preprint arXiv:2305.19523, 2023.",
|
| 236 |
+
"url": null
|
| 237 |
+
}
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"5": {
|
| 241 |
+
"title": "Metagpt: Meta programming for multi-agent collaborative framework.",
|
| 242 |
+
"author": "Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al.",
|
| 243 |
+
"venue": "arXiv preprint arXiv:2308.00352, 2023.",
|
| 244 |
+
"url": null
|
| 245 |
+
}
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"6": {
|
| 249 |
+
"title": "Semi-supervised classification with graph convolutional networks.",
|
| 250 |
+
"author": "Thomas N Kipf and Max Welling.",
|
| 251 |
+
"venue": "arXiv preprint arXiv:1609.02907, 2016.",
|
| 252 |
+
"url": null
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"7": {
|
| 257 |
+
"title": "Camel: Communicative agents for\u201d mind\u201d exploration of large language model society.",
|
| 258 |
+
"author": "Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem.",
|
| 259 |
+
"venue": "Advances in Neural Information Processing Systems, 36:51991\u201352008, 2023.",
|
| 260 |
+
"url": null
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"8": {
|
| 265 |
+
"title": "Se-sgformer: A self-explainable signed graph transformer for link sign prediction.",
|
| 266 |
+
"author": "Lu Li, Jiale Liu, Xingyu Ji, Maojun Wang, and Zeyu Zhang.",
|
| 267 |
+
"venue": "arXiv preprint arXiv:2408.08754, 2024a.",
|
| 268 |
+
"url": null
|
| 269 |
+
}
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"9": {
|
| 273 |
+
"title": "Pre-trained language models for interactive decision-making.",
|
| 274 |
+
"author": "Shuang Li, Xavier Puig, Chris Paxton, Yilun Du, Clinton Wang, Linxi Fan, Tao Chen, De-An Huang, Ekin Aky\u00fcrek, Anima Anandkumar, et al.",
|
| 275 |
+
"venue": "Advances in Neural Information Processing Systems, 35:31199\u201331212, 2022.",
|
| 276 |
+
"url": null
|
| 277 |
+
}
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"10": {
|
| 281 |
+
"title": "Guiding large language models via directional stimulus prompting.",
|
| 282 |
+
"author": "Zekun Li, Baolin Peng, Pengcheng He, Michel Galley, Jianfeng Gao, and Xifeng Yan.",
|
| 283 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2024b.",
|
| 284 |
+
"url": null
|
| 285 |
+
}
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"11": {
|
| 289 |
+
"title": "Code as policies: Language model programs for embodied control.",
|
| 290 |
+
"author": "Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng.",
|
| 291 |
+
"venue": "In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493\u20139500. IEEE, 2023.",
|
| 292 |
+
"url": null
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"12": {
|
| 297 |
+
"title": "Are large language models good prompt optimizers?",
|
| 298 |
+
"author": "Ruotian Ma, Xiaolei Wang, Xin Zhou, Jian Li, Nan Du, Tao Gui, Qi Zhang, and Xuanjing Huang.",
|
| 299 |
+
"venue": "arXiv preprint arXiv:2402.02101, 2024.",
|
| 300 |
+
"url": null
|
| 301 |
+
}
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"13": {
|
| 305 |
+
"title": "Train your own gnn teacher: Graph-aware distillation on textual graphs.",
|
| 306 |
+
"author": "Costas Mavromatis, Vassilis N Ioannidis, Shen Wang, Da Zheng, Soji Adeshina, Jun Ma, Han Zhao, Christos Faloutsos, and George Karypis.",
|
| 307 |
+
"venue": "In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 157\u2013173. Springer, 2023.",
|
| 308 |
+
"url": null
|
| 309 |
+
}
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"14": {
|
| 313 |
+
"title": "Automating the construction of internet portals with machine learning.",
|
| 314 |
+
"author": "Andrew McCallum, Kamal Nigam, Jason Rennie, and Kristie Seymore.",
|
| 315 |
+
"venue": "Information Retrieval Journal, 3:127\u2013163, 2000.",
|
| 316 |
+
"url": null
|
| 317 |
+
}
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"15": {
|
| 321 |
+
"title": "Enhancing student performance prediction on learnersourced questions with sgnn-llm synergy.",
|
| 322 |
+
"author": "Lin Ni, Sijie Wang, Zeyu Zhang, Xiaoxuan Li, Xianda Zheng, Paul Denny, and Jiamou Liu.",
|
| 323 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 23232\u201323240, 2024.",
|
| 324 |
+
"url": null
|
| 325 |
+
}
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"16": {
|
| 329 |
+
"title": "Automatic prompt optimization with\u201d gradient descent\u201d and beam search.",
|
| 330 |
+
"author": "Reid Pryzant, Dan Iter, Jerry Li, Yin Tat Lee, Chenguang Zhu, and Michael Zeng.",
|
| 331 |
+
"venue": "arXiv preprint arXiv:2305.03495, 2023.",
|
| 332 |
+
"url": null
|
| 333 |
+
}
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"17": {
|
| 337 |
+
"title": "Communicative agents for software development.",
|
| 338 |
+
"author": "Chen Qian, Xin Cong, Cheng Yang, Weize Chen, Yusheng Su, Juyuan Xu, Zhiyuan Liu, and Maosong Sun.",
|
| 339 |
+
"venue": "arXiv preprint arXiv:2307.07924, 6, 2023.",
|
| 340 |
+
"url": null
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"18": {
|
| 345 |
+
"title": "Disentangled representation learning with large language models for text-attributed graphs.",
|
| 346 |
+
"author": "Yijian Qin, Xin Wang, Ziwei Zhang, and Wenwu Zhu.",
|
| 347 |
+
"venue": "arXiv preprint arXiv:2310.18152, 2023.",
|
| 348 |
+
"url": null
|
| 349 |
+
}
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"19": {
|
| 353 |
+
"title": "Llm-planner: Few-shot grounded planning for embodied agents with large language models.",
|
| 354 |
+
"author": "Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su.",
|
| 355 |
+
"venue": "In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 2998\u20133009, 2023.",
|
| 356 |
+
"url": null
|
| 357 |
+
}
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"20": {
|
| 361 |
+
"title": "Deep language networks: Joint prompt training of stacked llms using variational inference.",
|
| 362 |
+
"author": "Alessandro Sordoni, Xingdi Yuan, Marc-Alexandre C\u00f4t\u00e9, Matheus Pereira, Adam Trischler, Ziang Xiao, Arian Hosseini, Friederike Niedtner, and Nicolas Le Roux.",
|
| 363 |
+
"venue": "arXiv preprint arXiv:2306.12509, 2023.",
|
| 364 |
+
"url": null
|
| 365 |
+
}
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"21": {
|
| 369 |
+
"title": "Graphgpt: Graph instruction tuning for large language models.",
|
| 370 |
+
"author": "Jiabin Tang, Yuhao Yang, Wei Wei, Lei Shi, Lixin Su, Suqi Cheng, Dawei Yin, and Chao Huang.",
|
| 371 |
+
"venue": "In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 491\u2013500, 2024.",
|
| 372 |
+
"url": null
|
| 373 |
+
}
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"22": {
|
| 377 |
+
"title": "Graph attention networks.",
|
| 378 |
+
"author": "Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio.",
|
| 379 |
+
"venue": "arXiv preprint arXiv:1710.10903, 2017.",
|
| 380 |
+
"url": null
|
| 381 |
+
}
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"23": {
|
| 385 |
+
"title": "Pgm-explainer: Probabilistic graphical model explanations for graph neural networks.",
|
| 386 |
+
"author": "Minh Vu and My T Thai.",
|
| 387 |
+
"venue": "Advances in neural information processing systems, 33:12225\u201312235, 2020.",
|
| 388 |
+
"url": null
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"24": {
|
| 393 |
+
"title": "Self-consistency improves chain of thought reasoning in language models.",
|
| 394 |
+
"author": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou.",
|
| 395 |
+
"venue": "arXiv preprint arXiv:2203.11171, 2022.",
|
| 396 |
+
"url": null
|
| 397 |
+
}
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"25": {
|
| 401 |
+
"title": "User: Unsupervised structural entropy-based robust graph neural network.",
|
| 402 |
+
"author": "Yifei Wang, Yupan Wang, Zeyu Zhang, Song Yang, Kaiqi Zhao, and Jiamou Liu.",
|
| 403 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 10235\u201310243, 2023.",
|
| 404 |
+
"url": null
|
| 405 |
+
}
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"26": {
|
| 409 |
+
"title": "Chain-of-thought prompting elicits reasoning in large language models.",
|
| 410 |
+
"author": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al.",
|
| 411 |
+
"venue": "Advances in neural information processing systems, 35:24824\u201324837, 2022.",
|
| 412 |
+
"url": null
|
| 413 |
+
}
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"27": {
|
| 417 |
+
"title": "Hard prompts made easy: Gradient-based discrete optimization for prompt tuning and discovery.",
|
| 418 |
+
"author": "Yuxin Wen, Neel Jain, John Kirchenbauer, Micah Goldblum, Jonas Geiping, and Tom Goldstein.",
|
| 419 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2024.",
|
| 420 |
+
"url": null
|
| 421 |
+
}
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"28": {
|
| 425 |
+
"title": "System 2 attention (is something you might need too).",
|
| 426 |
+
"author": "Jason Weston and Sainbayar Sukhbaatar.",
|
| 427 |
+
"venue": "arXiv preprint arXiv:2311.11829, 2023.",
|
| 428 |
+
"url": null
|
| 429 |
+
}
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"29": {
|
| 433 |
+
"title": "Autogen: Enabling next-gen llm applications via multi-agent conversation framework.",
|
| 434 |
+
"author": "Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Shaokun Zhang, Erkang Zhu, Beibin Li, Li Jiang, Xiaoyun Zhang, and Chi Wang.",
|
| 435 |
+
"venue": "arXiv preprint arXiv:2308.08155, 2023.",
|
| 436 |
+
"url": null
|
| 437 |
+
}
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"30": {
|
| 441 |
+
"title": "Verbalized machine learning: Revisiting machine learning with language models.",
|
| 442 |
+
"author": "Tim Z Xiao, Robert Bamler, Bernhard Sch\u00f6lkopf, and Weiyang Liu.",
|
| 443 |
+
"venue": "arXiv preprint arXiv:2406.04344, 2024.",
|
| 444 |
+
"url": null
|
| 445 |
+
}
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"31": {
|
| 449 |
+
"title": "Translating natural language to planning goals with large-language models.",
|
| 450 |
+
"author": "Yaqi Xie, Chen Yu, Tongyao Zhu, Jinbin Bai, Ze Gong, and Harold Soh.",
|
| 451 |
+
"venue": "arXiv preprint arXiv:2302.05128, 2023.",
|
| 452 |
+
"url": null
|
| 453 |
+
}
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"32": {
|
| 457 |
+
"title": "Graphformers: Gnn-nested transformers for representation learning on textual graph.",
|
| 458 |
+
"author": "Junhan Yang, Zheng Liu, Shitao Xiao, Chaozhuo Li, Defu Lian, Sanjay Agrawal, Amit Singh, Guangzhong Sun, and Xing Xie.",
|
| 459 |
+
"venue": "Advances in Neural Information Processing Systems, 34:28798\u201328810, 2021.",
|
| 460 |
+
"url": null
|
| 461 |
+
}
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"33": {
|
| 465 |
+
"title": "Zhongjing: Enhancing the chinese medical capabilities of large language model through expert feedback and real-world multi-turn dialogue.",
|
| 466 |
+
"author": "Songhua Yang, Hanjie Zhao, Senbin Zhu, Guangyu Zhou, Hongfei Xu, Yuxiang Jia, and Hongying Zan.",
|
| 467 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 19368\u201319376, 2024.",
|
| 468 |
+
"url": null
|
| 469 |
+
}
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"34": {
|
| 473 |
+
"title": "Tree of thoughts: Deliberate problem solving with large language models.",
|
| 474 |
+
"author": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan.",
|
| 475 |
+
"venue": "Advances in Neural Information Processing Systems, 36, 2024.",
|
| 476 |
+
"url": null
|
| 477 |
+
}
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"35": {
|
| 481 |
+
"title": "Beyond chain-of-thought, effective graph-of-thought reasoning in language models.",
|
| 482 |
+
"author": "Yao Yao, Zuchao Li, and Hai Zhao.",
|
| 483 |
+
"venue": "arXiv preprint arXiv:2305.16582, 2023.",
|
| 484 |
+
"url": null
|
| 485 |
+
}
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"36": {
|
| 489 |
+
"title": "Gnnexplainer: Generating explanations for graph neural networks.",
|
| 490 |
+
"author": "Zhitao Ying, Dylan Bourgeois, Jiaxuan You, Marinka Zitnik, and Jure Leskovec.",
|
| 491 |
+
"venue": "Advances in neural information processing systems, 32, 2019.",
|
| 492 |
+
"url": null
|
| 493 |
+
}
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"37": {
|
| 497 |
+
"title": "Xgnn: Towards model-level explanations of graph neural networks.",
|
| 498 |
+
"author": "Hao Yuan, Jiliang Tang, Xia Hu, and Shuiwang Ji.",
|
| 499 |
+
"venue": "In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining, pp. 430\u2013438, 2020.",
|
| 500 |
+
"url": null
|
| 501 |
+
}
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"38": {
|
| 505 |
+
"title": "Protgnn: Towards self-explaining graph neural networks.",
|
| 506 |
+
"author": "Zaixi Zhang, Qi Liu, Hao Wang, Chengqiang Lu, and Cheekong Lee.",
|
| 507 |
+
"venue": "In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 9127\u20139135, 2022a.",
|
| 508 |
+
"url": null
|
| 509 |
+
}
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"39": {
|
| 513 |
+
"title": "Contrastive learning for signed bipartite graphs.",
|
| 514 |
+
"author": "Zeyu Zhang, Jiamou Liu, Kaiqi Zhao, Song Yang, Xianda Zheng, and Yifei Wang.",
|
| 515 |
+
"venue": "In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 1629\u20131638, 2023a.",
|
| 516 |
+
"url": null
|
| 517 |
+
}
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"40": {
|
| 521 |
+
"title": "Rsgnn: A model-agnostic approach for enhancing the robustness of signed graph neural networks.",
|
| 522 |
+
"author": "Zeyu Zhang, Jiamou Liu, Xianda Zheng, Yifei Wang, Pengqian Han, Yupan Wang, Kaiqi Zhao, and Zijian Zhang.",
|
| 523 |
+
"venue": "In Proceedings of the ACM Web Conference 2023, pp. 60\u201370, 2023b.",
|
| 524 |
+
"url": null
|
| 525 |
+
}
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"41": {
|
| 529 |
+
"title": "Automatic chain of thought prompting in large language models.",
|
| 530 |
+
"author": "Zhuosheng Zhang, Aston Zhang, Mu Li, and Alex Smola.",
|
| 531 |
+
"venue": "arXiv preprint arXiv:2210.03493, 2022b.",
|
| 532 |
+
"url": null
|
| 533 |
+
}
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"42": {
|
| 537 |
+
"title": "Large language models are human-level prompt engineers.",
|
| 538 |
+
"author": "Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba.",
|
| 539 |
+
"venue": "arXiv preprint arXiv:2211.01910, 2022.",
|
| 540 |
+
"url": null
|
| 541 |
+
}
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"43": {
|
| 545 |
+
"title": "Textgnn: Improving text encoder via graph neural network in sponsored search.",
|
| 546 |
+
"author": "Jason Zhu, Yanling Cui, Yuming Liu, Hao Sun, Xue Li, Markus Pelger, Tianqi Yang, Liangjie Zhang, Ruofei Zhang, and Huasha Zhao.",
|
| 547 |
+
"venue": "In Proceedings of the Web Conference 2021, pp. 2848\u20132857, 2021.",
|
| 548 |
+
"url": null
|
| 549 |
+
}
|
| 550 |
+
}
|
| 551 |
+
],
|
| 552 |
+
"url": "http://arxiv.org/html/2410.01457v2"
|
| 553 |
+
}
|
20241004/2410.02279v2.json
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"title": "On Lai\u2019s Upper Confidence Bound in Multi-Armed Bandits",
|
| 3 |
+
"abstract": "In this memorial paper, we honor Tze Leung Lai\u2019s seminal contributions to the topic of multi-armed bandits, with a specific focus on his pioneering work on the upper confidence bound. We establish sharp non-asymptotic regret bounds for an upper confidence bound index with a constant level of exploration for Gaussian rewards. Furthermore, we establish a non-asymptotic regret bound for the upper confidence bound index of Lai, (1987) which employs an exploration function that decreases with the sample size of the corresponding arm. The regret bounds have leading constants that match the Lai-Robbins lower bound. Our results highlight an aspect of Lai\u2019s seminal works that deserves more attention in the machine learning literature.",
|
| 4 |
+
"sections": [
|
| 5 |
+
{
|
| 6 |
+
"section_id": "1",
|
| 7 |
+
"parent_section_id": null,
|
| 8 |
+
"section_name": "1. Introduction",
|
| 9 |
+
"text": "Originating from Thompson\u2019s seminal work (Thompson,, 1933 ###reference_b52###) on clinical trials, the multi-armed bandit problem was formally introduced and popularised by Robbins, (1952 ###reference_b48###), evolving into a cornerstone of sequential decision\u2013making in both statistics and machine learning. The multi-armed bandit problem concerns populations (arms) and the choice of adaptive allocation rules taking values in . An agent selects arm at time if , and subsequently receives a reward from the chosen arm. An allocation rule is adaptive if depends only on the previous allocations and rewards .\nAdaptive allocation rules are often referred to as policies or algorithms in the machine learning literature.\nThe objective of the agent is to maximize the expected cumulative reward up to a time horizon .\nIt follows from the optional stopping theorem that if the identity were known for a population with maximum mean, the agent would be able to maximize the expected reward by sampling exclusively the optimal arm.\nWithout knowing the optimal arm, a balance must be struck between exploring various arms to estimate their mean rewards and exploiting the most promising arm based on current information. This dilemma, known as the exploration\u2013exploitation trade-off, is a common challenge in reinforcement learning, and more generally in sequential design of statistical experiments.\nSignificant research in multi-armed bandits focused on the study of Bayesian optimal policies from 1960 to 1980, as explored in the seminal papers by Bellman, (1956 ###reference_b11###) and Bradt et al., (1956 ###reference_b12###).\nA notable breakthrough was the introduction of the Gittins index in Gittins and Jones, (1979 ###reference_b28###) and Gittins, (1979 ###reference_b27###), providing the optimal Bayesian strategy in the setting of infinite\u2013horizon discounted rewards. At each time point, Gittins\u2019 policy computes an index for each arm that depends solely on the observed samples of that arm, and selects the arm with the highest index. Such policies, referred to as index policies in the literature, are highly attractive as they are typically easy to explain.\nIn the frequentist framework formulated by Robbins, (1952 ###reference_b48###),\nthe regret of an allocation rule, defined as\nis commonly used to measure its performance,\nwhere is the mean of the optimal arm. For , Robbins, (1952 ###reference_b48###) proposed an allocation rule which achieves . Although Robbin\u2019s procedure implies that the average regret converges to zero, an optimal allocation rule with asymptotically the smallest regret remained unknown until Lai and Robbins, (1985 ###reference_b40###)\nestablished an information lower bound for the regret and proposed an asymptotically optimal allocation rule to achieve the lower bound\nin their groundbreaking work."
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"section_id": "1.1",
|
| 13 |
+
"parent_section_id": "1",
|
| 14 |
+
"section_name": "1.1. The Lai\u2013Robbbins lower bound",
|
| 15 |
+
"text": "Lai and Robbins, (1985 ###reference_b40###) established the first frequentist asymptotic lower bound of the regret for bandits with parametric reward distributions. The lower bound was subsequently generalized to multi-armed bandits with multi-parameter and nonparametric rewards (Burnetas and Katehakis,, 1996 ###reference_b16###), controlled Markov chains (Graves and Lai,, 1997 ###reference_b29###) and reinforcement learning (Burnetas and Katehakis,, 1997 ###reference_b17###).\nAssume that each arm has a density function as a member of a parametric family of distributions with unknown parameter . Under mild regularity conditions on ,\nLai and Robbins, (1985 ###reference_b40###) proved that for any \u201cconsistent\u201d allocation rule\nsatisfying for any , the following information lower bound must hold for the regret,\nwhere is the parameter of the optimal arm, is the mean of arm , and is the Kullback-Leibler (KL) divergence between and .\nThis Lai-Robbins lower bound characterizes the overall information complexity of the bandit instance , demonstrating that any consistent\nallocation rule achieving the lower bound must sample each inferior arm at least times asymptotically.\nAnother notable contribution of Lai and Robbins, (1985 ###reference_b40###) is the introduction of the concept of upper confidence bound (UCB), along with an allocation rule that asymptotically attains the lower bound. For each arm , their procedure cyclically compares the UCB of arm with the sample mean of the \u201cleading\u201d arm.\nWhen arm reaches its turn for possible allocation,\nit is sampled if its UCB exceeds the sample mean of the leading arm, and the leading arm is sampled otherwise. Due to the cyclic structure of the procedure, their policy is not an index policy. Later, Lai, (1987 ###reference_b38###) proposed an index policy based on UCB for a predetermined horizon . Agrawal, (1995 ###reference_b1###) and Katehakis and Robbins, (1995 ###reference_b33###) developed and studied UCB indices in the \u201canytime\u201d setting where the agent\u2019s performance is measured continuously without a predetermined horizon,\nrespectively for exponential family rewards and Gaussian rewards. Burnetas and Katehakis, (1996 ###reference_b16###) generalized UCB to multi-parameter and nonparametric reward distributions."
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"section_id": "1.2",
|
| 19 |
+
"parent_section_id": "1",
|
| 20 |
+
"section_name": "1.2. Lai\u2019s UCB",
|
| 21 |
+
"text": "Lai, (1987 ###reference_b38###) introduced the first UCB index policy for multi-armed bandits.\nConsider a -armed bandit problem with reward distributions in a one-parameter exponential family.\nLet be the maximum likelihood estimator of the parameter of arm based on data available at time , be the sample size of arm at time , and be the KL divergence between the reward distributions of arms and .\nAfter sampling each arm once, Lai\u2019s index is defined as\nwhere can be any function satisfying (i) for any , (ii) , and (iii) as for some . The function controls the margin error and is referred to as the exploration function in the machine learning literature (Audibert et al.,, 2009 ###reference_b6###). Lai proved that his UCB index (1 ###reference_###)\nachieves the asymptotic lower bound of Lai and Robbins, (1985 ###reference_b40###), and also approximates the Bayesian optimal policy asymptotically under mild conditions on the prior. Lai\u2019s analysis was based on his work on boundary crossing probabilities (Lai,, 1988 ###reference_b39###). In an accompanying paper, Chang and Lai, (1987 ###reference_b20###) showed that the Gittins index could also be approximated by an index of a similar form to Lai\u2019s index (1 ###reference_###) in the setting of infinite\u2013horizon discounted rewards.\nIn modern machine learning, variants of Lai\u2019s UCB were developed by inverting the KL divergence as in (1 ###reference_###) with various exploration functions for predetermined or unspecified horizon. For bandits with reward distributions in one-parameter exponential family,\nGarivier and Capp\u00e9, (2011 ###reference_b25###); Capp\u00e9 et al., (2013 ###reference_b18###) called the following index kl-UCB,\nwhere . They established a non-asymptotic regret bound whose leading constant achieves the Lai-Robbins lower bound and generalized the result to bounded rewards with finite support.\nWe notice that Lai\u2019s UCB in (1 ###reference_###) uses an exploration function that decreases with the sample size of the corresponding arm. Garivier and Capp\u00e9, (2011 ###reference_b25###) called the index kl-UCB+ when the in (2 ###reference_###) is replaced by , and studied its performance empirically. The idea of tuning the exploration function based on the sample size also appeared in Audibert and Bubeck, (2009 ###reference_b5###) who developed a UCB index called MOSS, which\nreplaces in (1) by , and proved that MOSS\nattains the minimax lower bound established in Auer et al., (1995 ###reference_b8###); Auer et al., 2002b ###reference_b9###.\nUnfortunately, the pioneering paper Lai, (1987 ###reference_b38###) was not cited early on\nin this proliferate literature."
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"section_id": "1.3",
|
| 25 |
+
"parent_section_id": "1",
|
| 26 |
+
"section_name": "1.3. Recent developments",
|
| 27 |
+
"text": "Auer et al., 2002a ###reference_b7### initiated the non-asymptotic analysis of UCB indices in the setting of nonparametric reward distributions. For multi-armed bandits with rewards bounded in , they consider the following index policy,\nwhere denotes the average reward of arm at time and is some constant. They established the following elegant regret bound for the index with .\nThis bound is logarithmic in and only includes constant factors in its second term. However, the leading constant factor 8 of in (4 ###reference_###) is bigger than the optimal constant factor \nfor this index because the maximum variance is for rewards in .\nBubeck, (2010 ###reference_b13###) established a regret bound\nwith a leading constant factor for any .\nAudibert et al., (2009 ###reference_b6###) proposed UCB indices based on empirical variances\nto achieve leading constants that depend on the variances of arms.\nAlthough the UCB indices mentioned above enjoy non-asymptotic regret guarantees for bounded rewards in , they do not satisfy the asymptotic lower bound based on minimum KL divergence\n(Burnetas and Katehakis,, 1996 ###reference_b16###). Honda and Takemura, (2010 ###reference_b31###, 2015 ###reference_b32###) developed asymptotically optimal algorithms based on minimum empirical divergence (MED) for bounded rewards in ,\nbut their algorithm is not index based.\nCapp\u00e9 et al., (2013 ###reference_b18###) studied the use of UCB-type polices to achieve the minimum KL lower bound\nfor rewards in . Moreover, Bubeck et al., (2013 ###reference_b15###) developed robust-UCB methods\nfor bandits with heavy-tailed rewards.\nIn the parametric case, building on the previous works (Garivier and Capp\u00e9,, 2011 ###reference_b25###; Maillard et al.,, 2011 ###reference_b46###), Capp\u00e9 et al., (2013 ###reference_b18###) developed non-asymptotic regret bounds of kl-UCB, as defined in (2 ###reference_###), for bandits with univariate exponential family rewards.\nHonda, (2019 ###reference_b30###) provided\nasymptotic guarantee of kl-UCB+ for Bernoulli rewards.\nKaufmann, (2018 ###reference_b34###) established non-asymptotic regret bounds for variants of Lai\u2019s UCB index and also generalized the lower bound in Lai, (1987 ###reference_b38###) for Bayes risk with product priors.\nMore recently, an active line of research is the development of bi-optimal UCB indices that are both minimax and asymptotically optimal for multi-armed bandits. M\u00e9nard and Garivier, (2017 ###reference_b47###) showed that a variant of kl-UCB called kl-UCB++ is bi-optimal for\nreward distributions in univariate exponential families.\nLattimore, (2018 ###reference_b43###) introduced Ada-UCB for Gaussian rewards to achieve a strong non-asymptotic regret bound. Garivier et al., (2022 ###reference_b26###) developed a bi-optimal UCB index combining MOSS (Audibert and Bubeck,, 2009 ###reference_b5###) and KL-UCB (Capp\u00e9 et al.,, 2013 ###reference_b18###) for rewards bounded in .\nApart from UCB-type policies, Thompson sampling (Thompson,, 1933 ###reference_b52###) has emerged as another prominent algorithm due to its strong empirical performance (Chapelle and Li,, 2011 ###reference_b21###). Non-asymptotic analysis of Thompson sampling was carried out in Agrawal and Goyal, (2012 ###reference_b2###, 2017 ###reference_b3###). Additionally, the asymptotic optimality of Thompson sampling\nwas established in Kaufmann et al., 2012b ###reference_b36### and Korda et al., (2013 ###reference_b37###)\nfor reward distributions in univariate exponential families. Other asymptotic optimal policies include BayesUCB (Kaufmann et al., 2012a, ###reference_b35###; Kaufmann,, 2018 ###reference_b34###) in the univariate exponential family case, ISM (Cowan et al.,, 2017 ###reference_b24###)) for Gaussian rewards with unknown means and variances, and algorithms based on sub-sampling (Baransi et al.,, 2014 ###reference_b10###; Chan,, 2020 ###reference_b19###). Readers are referred to Bubeck and Cesa-Bianchi, (2012 ###reference_b14###); Lattimore and Szepesv\u00e1ri, (2020 ###reference_b44###) for detailed references."
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"section_id": "1.4",
|
| 31 |
+
"parent_section_id": "1",
|
| 32 |
+
"section_name": "1.4. Our contributions",
|
| 33 |
+
"text": "In this paper, we establish non-asymptotic regret bounds for two UCB indices with a fixed horizon\nfor Gaussian rewards.\nFirst, we consider the following UCB index with a constant exploration function,\nwhere and is a constant depending on . This can be viewed as the choice of replacing by in (1 ###reference_###).\nFor and , our regret bound can be stated as\nNotice that the regret bound has a leading constant matching the Lai-Robbins lower bound. Additionally, our theory shows that a suitable choice of will lead to a regret bound with negative lower order terms. Similar regret bounds were obtained by Honda and Takemura, (2015 ###reference_b32###); Garivier et al., (2022 ###reference_b26###) for rewards bounded in .\nOur second contribution is a non-asymptotic regret bound for a specific instance of Lai\u2019s UCB index,\nwhich can be also viewed as the kl-UCB+ (Garivier and Capp\u00e9,, 2011 ###reference_b25###) for a fixed horizon.\nWe do not require an additional term in the exploration function\nas in the in Kaufmann, (2018 ###reference_b34###).\nHonda, (2019 ###reference_b30###) proved the asymptotic optimality of kl-UCB+ in the Bernoulli case.\nIn comparison, our regret bounds are fully non-asymptotic with sharp constant factor in the leading term and bounded second order term.\nWe took a different analytical approach compared with existing ones.\nA main issue in our analysis is to bound the probability\nfor a random walk to cross a square-root boundary.\nWe treat this boundary crossing probability as the Type I error of a repeated significance test\n(Woodroofe,, 1979 ###reference_b53###; Siegmund,, 1985 ###reference_b50###, 1986 ###reference_b51###)\nand apply a non-asymptotic version of the nonlinear renewal theory\n(Lai and Siegmund,, 1977 ###reference_b41###, 1979 ###reference_b42###; Woodroofe,, 1982 ###reference_b54###; Zhang,, 1988 ###reference_b55###)\ninstead of directly using a result in Lerche, (2013 ###reference_b45###) as in Lattimore, (2018 ###reference_b43###).\nInterestingly, in addition to multi-armed bandits,\nthe square\u2013root boundary is connected to the repeated significance test in clinical trials (Armitage,, 1960 ###reference_b4###) and optimal stopping\nfor random walks (Chow and Robbins,, 1965 ###reference_b22###; Chow et al.,, 1971 ###reference_b23###) and Brownian motion (Shepp,, 1969 ###reference_b49###)."
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"section_id": "1.5",
|
| 37 |
+
"parent_section_id": "1",
|
| 38 |
+
"section_name": "1.5. Organization",
|
| 39 |
+
"text": "The rest of this paper is organized as follows.\nSection 2 ###reference_### presents the non-asymptotic regret bounds of UCB indices. Section 3 ###reference_### presents the proofs of our regret bounds.\nSection 4 ###reference_### provides some technical lemmas and their proofs."
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"section_id": "2",
|
| 43 |
+
"parent_section_id": null,
|
| 44 |
+
"section_name": "2. Main results",
|
| 45 |
+
"text": "In this section, we present sharp regret bounds of a UCB index with a constant level of exploration under the fixed horizon and a similar non-asymptotic regret bound for Lai\u2019s UCB index."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"section_id": "2.1",
|
| 49 |
+
"parent_section_id": "2",
|
| 50 |
+
"section_name": "2.1. Problem setting",
|
| 51 |
+
"text": "We focus on a -armed bandit problem with a fixed time horizon , ,\nand assume that\nthe rewards sampled from arm are independent and identically distributed\nGaussian random variables with mean and a variance no greater than .\nLet denote the reward received at each time and .\nAn allocation rule , , is adaptive if is measurable for each . We assume\nWe denote the maximal mean among arms by \nand the optimal arm by with an arbitrary tie-breaking rule.\nAll allocation rules considered in this paper are initialized by . Let and . The sample size of arm at time is denoted as . The cumulative regret after the initialization is defined as follows,\nwhere the last equality follows from conditioning.\nThroughout the paper, we use and \nto denote the standard Gaussian density and cumulative distribution functions respectively, and to denote a standard Brownian motion. In addition, for real , and for reals and ."
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"section_id": "2.2",
|
| 55 |
+
"parent_section_id": "2",
|
| 56 |
+
"section_name": "2.2. Regret bounds for UCB with a constant level of exploration",
|
| 57 |
+
"text": "Let be a constant level of exploration depending on and define the following UCB index\nwith an initialization ,\nwhere is a prespecified noise level, is the sample size and is the average rewards of arm at time after is sampled.\nAn arbitrary tie-breaking rule is applied to address multiple maxima in (6 ###reference_###).\nDefine\nWe have the following regret upper bound for the allocation rule (6 ###reference_###).\nSuppose the rewards from arm follow a Gaussian distribution with mean and no greater variance than for all . Then, the regret of the UCB rule (6 ###reference_###) is bounded by\nwhere is defined in (5 ###reference_###) and is defined in (7 ###reference_###).\nIn the numerator of the right-hand side of (8 ###reference_###), the term represents the leading term, and is as by choosing properly. The component within in (7 ###reference_###) corresponds to the boundary crossing probability of Brownian motion, which can be interpreted as the size of a repeated significance test. For detailed studies, see Woodroofe, (1979 ###reference_b53###) and Siegmund, (1985 ###reference_b50###). A non-asymptotic upper bound for this probability is provided in Lemma 10 ###reference_orem10###\nin Section 4 ###reference_###.\nTheorem 1 ###reference_orem1###, combined with numerical evaluations, leads to the following corollary.\nSetting in (6 ###reference_###), we find that\nwhere as and\n for .\nCorollary 3 ###reference_orem3### establishes a sharp non-asymptotic regret bound with optimal leading constant, which implies that the UCB rule achieves the\ninformation lower bound of Lai and Robbins, (1985 ###reference_b40###). In fact, for the choice in the above corollary this optimality is uniform in the sense of\nwhere .\nAccording to Lemma 10 ###reference_orem10### in Section 4 ###reference_###,\n in (7 ###reference_###) for ,\nso that the second term in (7 ###reference_###) is negligible as . Therefore, Theorem 1 ###reference_orem1### suggests the use of\nthe exploration level\nThe UCB rule (6 ###reference_###) with the exploration level in (9 ###reference_###) enjoys the following regret bound,\nfor some depending on only."
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"section_id": "2.3",
|
| 61 |
+
"parent_section_id": "2",
|
| 62 |
+
"section_name": "2.3. Regret bound for Lai\u2019s UCB",
|
| 63 |
+
"text": "In this section, we consider Lai\u2019s UCB index in (1 ###reference_###) with \nfor Gaussian rewards.\nLet . With an initialization , Lai\u2019s UCB rule can be written as\nwhere , and are defined as in (6 ###reference_###), and is a prespecified noise level. Again any tie-breaking rule can be applied in (10 ###reference_###).\nSuppose the rewards from arm follow a Gaussian distribution with mean \nand no greater variance than for all .\nLet .\nThen, the UCB index policy in (10 ###reference_###) satisfies\nwhere is defined in (5 ###reference_###),\n, and is uniformly bounded with\n and as .\nKaufmann, (2018 ###reference_b34###) established non-asymptotic regret bounds for the UCB index rule in (10 ###reference_###) with an exploration function for rewards of univariate exponential families. However, their regret bound does not have a sharp leading constant and has as lower order terms."
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"section_id": "3",
|
| 67 |
+
"parent_section_id": null,
|
| 68 |
+
"section_name": "3. Proofs of regret bounds",
|
| 69 |
+
"text": "We provide here the proofs of the regret upper bounds in\nthe main theorems and corollaries presented in Section 2 ###reference_###.\nThe following notation will be used throughout this section. We define as the -th sample from arm , as the sample average and . For suboptimal arms , we write and ."
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"section_id": "3.1",
|
| 73 |
+
"parent_section_id": "3",
|
| 74 |
+
"section_name": "3.1. Proof of Theorem 1",
|
| 75 |
+
"text": "According to the above notation, the UCB index can be expressed as\nFor the optimal arm , let \nand be its distribution function . We have\nwhere and .\nBecause is a bounded nonnegative non-increasing differentiable function of ,\nBy Lemma 8 ###reference_orem8###, for all , so that\nin view of (7 ###reference_###) and the fact that .\nThe conclusion follows from (12 ###reference_###)."
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"section_id": "3.2",
|
| 79 |
+
"parent_section_id": "3",
|
| 80 |
+
"section_name": "3.2. Proof of Corollary 3",
|
| 81 |
+
"text": "Inserting into the upper bound of Lemma 10 ###reference_orem10### yields for . According to the proof of Lemma 10 ###reference_orem10###, can be bounded by the integrals in (23 ###reference_###). Numerical evaluations of (23 ###reference_###) and for various values of lead to our conclusion."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"section_id": "3.3",
|
| 85 |
+
"parent_section_id": "3",
|
| 86 |
+
"section_name": "3.3. Proof of Corollary 5",
|
| 87 |
+
"text": "As is defined implicitly in (9 ###reference_###), we first derive an\nexpansion of it.\nLet\nso that .\nAs , is strictly convex in and the solution is uniquely the solution of or equivalently the solution of\nAs ,\nit follows that for all , and and\nas .\nThe conclusion is deduced from Theorem 1 ###reference_orem1###."
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"section_id": "3.4",
|
| 91 |
+
"parent_section_id": "3",
|
| 92 |
+
"section_name": "3.4. Proof of Theorem 6",
|
| 93 |
+
"text": "Let \nand be the distribution function of , . We have\nas in the proof of Theorem 1 ###reference_orem1###, where .\nLet satisfying , , be the solution of\n,\nand .\nBecause and\n for ,\nwhere \nwith .\nAs ,\n.\nDefine .\nBy (19 ###reference_###) and the above inequality,\nfor any , where with\n.\nAs ,\nBy Lemma 11 ###reference_orem11###,\n for all .\nLet \nwith and\n.\nAs ,\nthe above integral is bounded by\nMoreover, because ,\nwe find that\nfor any choice of .\nFor and , by Lemma 11 ###reference_orem11### and\nthe right-hand side above is no greater than 14.8, so that\nwith . For ,\nso that (20 ###reference_###) holds with anyways.\nFor fixed , \nand Lemma 11 ###reference_orem11### provides as .\nBecause when and\n when ,\n when .\nThus, in (20 ###reference_###) when .\nThe conclusion follows directly from (20 ###reference_###)\nas ."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"section_id": "4",
|
| 97 |
+
"parent_section_id": null,
|
| 98 |
+
"section_name": "4. Technical lemmas",
|
| 99 |
+
"text": "In this section, we provide some inequalities for boundary crossing probabilities.\nAmong them, Lemmas 8 ###reference_orem8### and 10 ###reference_orem10### are\nused in the proof of Theorems 1 ###reference_orem1###,\nand Lemma 11 ###reference_orem11### is used in the proof of\nTheorem 6 ###reference_orem6###.\nOur first lemma deals with the square root boundary crossing for a Brownian motion with drift .\nLet . Then, for all and\nwhere is defined as in (7 ###reference_###).\nAs , the union bound gives\nThe right-hand side equals .\n\u220e\nWe need the following inequalty for an expected stopping rule\nin the proof of Lemma 10 ###reference_orem10###.\nLet be a Brownian motion with drift under . Define\n, \nand . Then,\nBy definition is the solution of .\nAs , .\nAs ,\nIt follows from Wald\u2019s identity that\nwhich is equivalent to\n.\nBecause , the unique solution of the above equation is\n. It follows that\n.\n\u220e\nAs Lemma 8 ###reference_orem8###, the following lemma deals with the driftless case.\nLet be a standard Brownian motion under .\nLet .\nFor all real numbers and ,\nAssume without loss of generality as is a Brownian motion.\nLet under and\nLet be the sigma-field generated by .\nThe likelihood ratio in is\n and\n. Thus, Wald\u2019s likelihood ratio argument provides\nWith an application of Lemma 9 ###reference_orem9### and variable change , we find that\nThe first double integral on the right-hand side above is bounded by\nand , while the second is bounded by\nInserting the above bounds to (23 ###reference_###), we find that\nThe conclusion follows as .\n\u220e\nFinally, our last lemma deals with the boundary of Lai\u2019s UCB, or equivalently\nthe boundary for repeated significance test with slowly changing threshold level\n. Recall that .\nLet be a standard Brownian motion. Then,\nfor all positive and , with \nand .\nWe write the probability in (24 ###reference_###) as\nwith and .\nAs is a standard Brownian motion,\nthe probability depends on only through .\nIn what follows we assume without loss of generality and .\nLet and .\nFor , is the unique solution of \nin . For , .\nThe function is decreasing in and increasing in .\nLet\nThe probability is maximized at .\nBecause ,\n is increasing in for and decreasing in for .\nFor , Theorem 2.18 of Siegmund, (1986 ###reference_b51###) provides\nThus, as for ,\nBecause is also a standard Browning motion,\nLet and define\n and\n.\nFor\nLet , and\n.\nFor ,\nIntegrating the above inequality over , we find that\nLet .\nAs ,\nAs and ,\nFor and , the right-hand side above is no greater than .\nNow consider the case of . Let\nAs and\n, for large we have\n and\nby the dominated convergence theorem.\nThus, as .\n\u220e"
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"section_id": "5",
|
| 103 |
+
"parent_section_id": null,
|
| 104 |
+
"section_name": "5. Conclusion",
|
| 105 |
+
"text": "Our work establishes sharp non-asymptotic regret bounds for UCB indices with a constant level of exploration and a similar non-asymptotic regret bound for Lai\u2019s UCB index under the Gaussian reward assumption. In our analysis, the Gaussian assumption can be relaxed to sub-Gaussian assumptions with somewhat messier nonasymptotic regret bounds. Generalization of our analysis to anytime UCB indices is left for future work. Since UCB is widely used in other settings, such as contextual bandits and reinforcement learning, the analytic approach developed in this paper has potential applications beyond the multi-armed bandit problem."
|
| 106 |
+
}
|
| 107 |
+
],
|
| 108 |
+
"appendix": [],
|
| 109 |
+
"tables": {},
|
| 110 |
+
"image_paths": {},
|
| 111 |
+
"validation": true,
|
| 112 |
+
"references": [
|
| 113 |
+
{
|
| 114 |
+
"1": {
|
| 115 |
+
"title": "Sample mean based index policies by regret for the multi-armed bandit problem.",
|
| 116 |
+
"author": "Agrawal, R. (1995).",
|
| 117 |
+
"venue": "Advances in Applied Probability, 27(4):1054\u20131078.",
|
| 118 |
+
"url": null
|
| 119 |
+
}
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"2": {
|
| 123 |
+
"title": "Analysis of Thompson sampling for the multi-armed bandit problem.",
|
| 124 |
+
"author": "Agrawal, S. and Goyal, N. (2012).",
|
| 125 |
+
"venue": "In Conference on Learning Theory, pages 39.1\u201339.26. JMLR Workshop and Conference Proceedings.",
|
| 126 |
+
"url": null
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"3": {
|
| 131 |
+
"title": "Near-optimal regret bounds for Thompson sampling.",
|
| 132 |
+
"author": "Agrawal, S. and Goyal, N. (2017).",
|
| 133 |
+
"venue": "Journal of the ACM (JACM), 64(5):1\u201324.",
|
| 134 |
+
"url": null
|
| 135 |
+
}
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"4": {
|
| 139 |
+
"title": "Sequential medical trials.",
|
| 140 |
+
"author": "Armitage, P. (1960).",
|
| 141 |
+
"venue": "Sequential Medical Trials.",
|
| 142 |
+
"url": null
|
| 143 |
+
}
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"5": {
|
| 147 |
+
"title": "Minimax policies for adversarial and stochastic bandits.",
|
| 148 |
+
"author": "Audibert, J.-Y. and Bubeck, S. (2009).",
|
| 149 |
+
"venue": "In Conference on Learning Theory, volume 7, pages 1\u2013122.",
|
| 150 |
+
"url": null
|
| 151 |
+
}
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"6": {
|
| 155 |
+
"title": "Exploration\u2013exploitation tradeoff using variance estimates in multi-armed bandits.",
|
| 156 |
+
"author": "Audibert, J.-Y., Munos, R., and Szepesv\u00e1ri, C. (2009).",
|
| 157 |
+
"venue": "Theoretical Computer Science, 410(19):1876\u20131902.",
|
| 158 |
+
"url": null
|
| 159 |
+
}
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"7": {
|
| 163 |
+
"title": "Finite-time analysis of the multiarmed bandit problem.",
|
| 164 |
+
"author": "Auer, P., Cesa-Bianchi, N., and Fischer, P. (2002a).",
|
| 165 |
+
"venue": "Machine Learning, 47(2):235\u2013256.",
|
| 166 |
+
"url": null
|
| 167 |
+
}
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"8": {
|
| 171 |
+
"title": "Gambling in a rigged casino: The adversarial multi-armed bandit problem.",
|
| 172 |
+
"author": "Auer, P., Cesa-Bianchi, N., Freund, Y., and Schapire, R. E. (1995).",
|
| 173 |
+
"venue": "In Proceedings of IEEE 36th Annual Foundations of Computer Science, pages 322\u2013331. IEEE.",
|
| 174 |
+
"url": null
|
| 175 |
+
}
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"9": {
|
| 179 |
+
"title": "The nonstochastic multiarmed bandit problem.",
|
| 180 |
+
"author": "Auer, P., Cesa-Bianchi, N., Freund, Y., and Schapire, R. E. (2002b).",
|
| 181 |
+
"venue": "SIAM Journal on Computing, 32(1):48\u201377.",
|
| 182 |
+
"url": null
|
| 183 |
+
}
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"10": {
|
| 187 |
+
"title": "Sub-sampling for multi-armed bandits.",
|
| 188 |
+
"author": "Baransi, A., Maillard, O.-A., and Mannor, S. (2014).",
|
| 189 |
+
"venue": "In Machine Learning and Knowledge Discovery in Databases: European Conference, ECML PKDD 2014, Nancy, France, September 15-19, 2014. Proceedings, Part I 14, pages 115\u2013131. Springer.",
|
| 190 |
+
"url": null
|
| 191 |
+
}
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"11": {
|
| 195 |
+
"title": "A problem in the sequential design of experiments.",
|
| 196 |
+
"author": "Bellman, R. (1956).",
|
| 197 |
+
"venue": "Sankhy\u0101: The Indian Journal of Statistics (1933-1960), 16(3/4):221\u2013229.",
|
| 198 |
+
"url": null
|
| 199 |
+
}
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"12": {
|
| 203 |
+
"title": "On sequential designs for maximizing the sum of n observations.",
|
| 204 |
+
"author": "Bradt, R. N., Johnson, S., and Karlin, S. (1956).",
|
| 205 |
+
"venue": "The Annals of Mathematical Statistics, 27(4):1060\u20131074.",
|
| 206 |
+
"url": null
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"13": {
|
| 211 |
+
"title": "Bandits Games and Clustering Foundations.",
|
| 212 |
+
"author": "Bubeck, S. (2010).",
|
| 213 |
+
"venue": "PhD thesis, Universit\u00e9 Lille 1, France.",
|
| 214 |
+
"url": null
|
| 215 |
+
}
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"14": {
|
| 219 |
+
"title": "Regret analysis of stochastic and nonstochastic multi-armed bandit problems.",
|
| 220 |
+
"author": "Bubeck, S. and Cesa-Bianchi, N. (2012).",
|
| 221 |
+
"venue": "Foundations and Trends\u00ae in Machine Learning, 5(1):1\u2013122.",
|
| 222 |
+
"url": null
|
| 223 |
+
}
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"15": {
|
| 227 |
+
"title": "Bandits with heavy tail.",
|
| 228 |
+
"author": "Bubeck, S., Cesa-Bianchi, N., and Lugosi, G. (2013).",
|
| 229 |
+
"venue": "IEEE Transactions on Information Theory, 59(11):7711\u20137717.",
|
| 230 |
+
"url": null
|
| 231 |
+
}
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"16": {
|
| 235 |
+
"title": "Optimal adaptive policies for sequential allocation problems.",
|
| 236 |
+
"author": "Burnetas, A. N. and Katehakis, M. N. (1996).",
|
| 237 |
+
"venue": "Advances in Applied Mathematics, 17(2):122\u2013142.",
|
| 238 |
+
"url": null
|
| 239 |
+
}
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"17": {
|
| 243 |
+
"title": "Optimal adaptive policies for markov decision processes.",
|
| 244 |
+
"author": "Burnetas, A. N. and Katehakis, M. N. (1997).",
|
| 245 |
+
"venue": "Mathematics of Operations Research, 22(1):222\u2013255.",
|
| 246 |
+
"url": null
|
| 247 |
+
}
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"18": {
|
| 251 |
+
"title": "Kullback-Leibler upper confidence bounds for optimal sequential allocation.",
|
| 252 |
+
"author": "Capp\u00e9, O., Garivier, A., Maillard, O.-A., Munos, R., and Stoltz, G. (2013).",
|
| 253 |
+
"venue": "The Annals of Statistics, pages 1516\u20131541.",
|
| 254 |
+
"url": null
|
| 255 |
+
}
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"19": {
|
| 259 |
+
"title": "The multi-armed bandit problem.",
|
| 260 |
+
"author": "Chan, H. P. (2020).",
|
| 261 |
+
"venue": "The Annals of Statistics, 48(1):346\u2013373.",
|
| 262 |
+
"url": null
|
| 263 |
+
}
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"20": {
|
| 267 |
+
"title": "Optimal stopping and dynamic allocation.",
|
| 268 |
+
"author": "Chang, F. and Lai, T. L. (1987).",
|
| 269 |
+
"venue": "Advances in Applied Probability, 19(4):829\u2013853.",
|
| 270 |
+
"url": null
|
| 271 |
+
}
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"21": {
|
| 275 |
+
"title": "An empirical evaluation of Thompson sampling.",
|
| 276 |
+
"author": "Chapelle, O. and Li, L. (2011).",
|
| 277 |
+
"venue": "Advances in Neural Information Processing Systems, 24.",
|
| 278 |
+
"url": null
|
| 279 |
+
}
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"22": {
|
| 283 |
+
"title": "On optimal stopping rules for .",
|
| 284 |
+
"author": "Chow, Y.-S. and Robbins, H. (1965).",
|
| 285 |
+
"venue": "Illinois Journal of Mathematics, 9(3):444\u2013454.",
|
| 286 |
+
"url": null
|
| 287 |
+
}
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"23": {
|
| 291 |
+
"title": "Great expectations: the theory of optimal stopping.",
|
| 292 |
+
"author": "Chow, Y.-S., Robbins, H., and Siegmund, D. (1971).",
|
| 293 |
+
"venue": "Houghton Mifflin Co., Boston, MA.",
|
| 294 |
+
"url": null
|
| 295 |
+
}
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"24": {
|
| 299 |
+
"title": "Normal bandits of unknown means and variances.",
|
| 300 |
+
"author": "Cowan, W., Honda, J., and Katehakis, M. N. (2017).",
|
| 301 |
+
"venue": "Journal of Machine Learning Research, 18:154\u20131.",
|
| 302 |
+
"url": null
|
| 303 |
+
}
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"25": {
|
| 307 |
+
"title": "The kl-ucb algorithm for bounded stochastic bandits and beyond.",
|
| 308 |
+
"author": "Garivier, A. and Capp\u00e9, O. (2011).",
|
| 309 |
+
"venue": "In Proceedings of the 24th annual conference on learning theory, pages 359\u2013376. JMLR Workshop and Conference Proceedings.",
|
| 310 |
+
"url": null
|
| 311 |
+
}
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"26": {
|
| 315 |
+
"title": "KL-UCB-switch: Optimal regret bounds for stochastic bandits from both a distribution-dependent and a distribution-free viewpoints.",
|
| 316 |
+
"author": "Garivier, A., Hadiji, H., M\u00e9nard, P., and Stoltz, G. (2022).",
|
| 317 |
+
"venue": "Journal of Machine Learning Research, 23(179):1\u201366.",
|
| 318 |
+
"url": null
|
| 319 |
+
}
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"27": {
|
| 323 |
+
"title": "Bandit processes and dynamic allocation indices.",
|
| 324 |
+
"author": "Gittins, J. C. (1979).",
|
| 325 |
+
"venue": "Journal of the Royal Statistical Society Series B: Statistical Methodology, 41(2):148\u2013164.",
|
| 326 |
+
"url": null
|
| 327 |
+
}
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"28": {
|
| 331 |
+
"title": "A dynamic allocation index for the discounted multiarmed bandit problem.",
|
| 332 |
+
"author": "Gittins, J. C. and Jones, D. M. (1979).",
|
| 333 |
+
"venue": "Biometrika, 66(3):561\u2013565.",
|
| 334 |
+
"url": null
|
| 335 |
+
}
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"29": {
|
| 339 |
+
"title": "Asymptotically efficient adaptive choice of control laws incontrolled markov chains.",
|
| 340 |
+
"author": "Graves, T. L. and Lai, T. L. (1997).",
|
| 341 |
+
"venue": "SIAM journal on control and optimization, 35(3):715\u2013743.",
|
| 342 |
+
"url": null
|
| 343 |
+
}
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"30": {
|
| 347 |
+
"title": "A note on kl-ucb+ policy for the stochastic bandit.",
|
| 348 |
+
"author": "Honda, J. (2019).",
|
| 349 |
+
"venue": "arXiv preprint arXiv:1903.07839.",
|
| 350 |
+
"url": null
|
| 351 |
+
}
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"31": {
|
| 355 |
+
"title": "An asymptotically optimal bandit algorithm for bounded support models.",
|
| 356 |
+
"author": "Honda, J. and Takemura, A. (2010).",
|
| 357 |
+
"venue": "In Conference on Learning Theory, pages 67\u201379.",
|
| 358 |
+
"url": null
|
| 359 |
+
}
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"32": {
|
| 363 |
+
"title": "Non-asymptotic analysis of a new bandit algorithm for semi-bounded rewards.",
|
| 364 |
+
"author": "Honda, J. and Takemura, A. (2015).",
|
| 365 |
+
"venue": "Journal of Machine Learning Research, 16:3721\u20133756.",
|
| 366 |
+
"url": null
|
| 367 |
+
}
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"33": {
|
| 371 |
+
"title": "Sequential choice from several populations.",
|
| 372 |
+
"author": "Katehakis, M. N. and Robbins, H. (1995).",
|
| 373 |
+
"venue": "Proceedings of the National Academy of Sciences of the United States of America, 92(19):8584.",
|
| 374 |
+
"url": null
|
| 375 |
+
}
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"34": {
|
| 379 |
+
"title": "On Bayesian index policies for sequential resource allocation.",
|
| 380 |
+
"author": "Kaufmann, E. (2018).",
|
| 381 |
+
"venue": "The Annals of Statistics, 46(2):842\u2013865.",
|
| 382 |
+
"url": null
|
| 383 |
+
}
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"35": {
|
| 387 |
+
"title": "On bayesian upper confidence bounds for bandit problems.",
|
| 388 |
+
"author": "Kaufmann, E., Capp\u00e9, O., and Garivier, A. (2012a).",
|
| 389 |
+
"venue": "In Artificial intelligence and statistics, pages 592\u2013600. PMLR.",
|
| 390 |
+
"url": null
|
| 391 |
+
}
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"36": {
|
| 395 |
+
"title": "Thompson sampling: An asymptotically optimal finite-time analysis.",
|
| 396 |
+
"author": "Kaufmann, E., Korda, N., and Munos, R. (2012b).",
|
| 397 |
+
"venue": "In Algorithmic Learning Theory, pages 199\u2013213. Springer.",
|
| 398 |
+
"url": null
|
| 399 |
+
}
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"37": {
|
| 403 |
+
"title": "Thompson sampling for 1-dimensional exponential family bandits.",
|
| 404 |
+
"author": "Korda, N., Kaufmann, E., and Munos, R. (2013).",
|
| 405 |
+
"venue": "Advances in Neural Information Processing Systems, 26:1448\u20131456.",
|
| 406 |
+
"url": null
|
| 407 |
+
}
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"38": {
|
| 411 |
+
"title": "Adaptive treatment allocation and the multi-armed bandit problem.",
|
| 412 |
+
"author": "Lai, T. L. (1987).",
|
| 413 |
+
"venue": "The Annals of Statistics, pages 1091\u20131114.",
|
| 414 |
+
"url": null
|
| 415 |
+
}
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"39": {
|
| 419 |
+
"title": "Boundary crossing problems for sample means.",
|
| 420 |
+
"author": "Lai, T. L. (1988).",
|
| 421 |
+
"venue": "The Annals of Probability, pages 375\u2013396.",
|
| 422 |
+
"url": null
|
| 423 |
+
}
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"40": {
|
| 427 |
+
"title": "Asymptotically efficient adaptive allocation rules.",
|
| 428 |
+
"author": "Lai, T. L. and Robbins, H. (1985).",
|
| 429 |
+
"venue": "Advances in Applied Mathematics, 6(1):4\u201322.",
|
| 430 |
+
"url": null
|
| 431 |
+
}
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"41": {
|
| 435 |
+
"title": "A nonlinear renewal theory with applications to sequential analysis i.",
|
| 436 |
+
"author": "Lai, T. L. and Siegmund, D. (1977).",
|
| 437 |
+
"venue": "The Annals of Statistics, pages 946\u2013954.",
|
| 438 |
+
"url": null
|
| 439 |
+
}
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"42": {
|
| 443 |
+
"title": "A nonlinear renewal theory with applications to sequential analysis ii.",
|
| 444 |
+
"author": "Lai, T. L. and Siegmund, D. (1979).",
|
| 445 |
+
"venue": "The Annals of Statistics, pages 60\u201376.",
|
| 446 |
+
"url": null
|
| 447 |
+
}
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"43": {
|
| 451 |
+
"title": "Refining the confidence level for optimistic bandit strategies.",
|
| 452 |
+
"author": "Lattimore, T. (2018).",
|
| 453 |
+
"venue": "The Journal of Machine Learning Research, 19(1):765\u2013796.",
|
| 454 |
+
"url": null
|
| 455 |
+
}
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"44": {
|
| 459 |
+
"title": "Bandit algorithms.",
|
| 460 |
+
"author": "Lattimore, T. and Szepesv\u00e1ri, C. (2020).",
|
| 461 |
+
"venue": "Cambridge University Press.",
|
| 462 |
+
"url": null
|
| 463 |
+
}
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"45": {
|
| 467 |
+
"title": "Boundary Crossing of Brownian Motion: Its Relation to the Law of the Iterated Logarithm and to Sequential Analysis, volume 40.",
|
| 468 |
+
"author": "Lerche, H. R. (2013).",
|
| 469 |
+
"venue": "Springer Science & Business Media.",
|
| 470 |
+
"url": null
|
| 471 |
+
}
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"46": {
|
| 475 |
+
"title": "A finite-time analysis of multi-armed bandits problems with Kullback-Leibler divergences.",
|
| 476 |
+
"author": "Maillard, O.-A., Munos, R., and Stoltz, G. (2011).",
|
| 477 |
+
"venue": "In Proceedings of the 24th annual Conference On Learning Theory, pages 497\u2013514. JMLR Workshop and Conference Proceedings.",
|
| 478 |
+
"url": null
|
| 479 |
+
}
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"47": {
|
| 483 |
+
"title": "A minimax and asymptotically optimal algorithm for stochastic bandits.",
|
| 484 |
+
"author": "M\u00e9nard, P. and Garivier, A. (2017).",
|
| 485 |
+
"venue": "In Algorithmic Learning Theory, pages 223\u2013237. PMLR.",
|
| 486 |
+
"url": null
|
| 487 |
+
}
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"48": {
|
| 491 |
+
"title": "Some aspects of the sequential design of experiments.",
|
| 492 |
+
"author": "Robbins, H. (1952).",
|
| 493 |
+
"venue": "Bulletin of the American Mathematical Society, 58:527\u2013535.",
|
| 494 |
+
"url": null
|
| 495 |
+
}
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"49": {
|
| 499 |
+
"title": "Explicit solutions to some problems of optimal stopping.",
|
| 500 |
+
"author": "Shepp, L. A. (1969).",
|
| 501 |
+
"venue": "The Annals of Mathematical Statistics, 40(3):993.",
|
| 502 |
+
"url": null
|
| 503 |
+
}
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"50": {
|
| 507 |
+
"title": "Sequential Analysis: Tests and Confidence Intervals.",
|
| 508 |
+
"author": "Siegmund, D. (1985).",
|
| 509 |
+
"venue": "Springer Science & Business Media.",
|
| 510 |
+
"url": null
|
| 511 |
+
}
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"51": {
|
| 515 |
+
"title": "Boundary crossing probabilities and statistical applications.",
|
| 516 |
+
"author": "Siegmund, D. (1986).",
|
| 517 |
+
"venue": "The Annals of Statistics, pages 361\u2013404.",
|
| 518 |
+
"url": null
|
| 519 |
+
}
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"52": {
|
| 523 |
+
"title": "On the likelihood that one unknown probability exceeds another in view of the evidence of two samples.",
|
| 524 |
+
"author": "Thompson, W. R. (1933).",
|
| 525 |
+
"venue": "Biometrika, 25(3-4):285\u2013294.",
|
| 526 |
+
"url": null
|
| 527 |
+
}
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"53": {
|
| 531 |
+
"title": "Repeated likelihood ratio tests.",
|
| 532 |
+
"author": "Woodroofe, M. (1979).",
|
| 533 |
+
"venue": "Biometrika, 66(3):453\u2013463.",
|
| 534 |
+
"url": null
|
| 535 |
+
}
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"54": {
|
| 539 |
+
"title": "Nonlinear Renewal Theory in Sequential Analysis.",
|
| 540 |
+
"author": "Woodroofe, M. (1982).",
|
| 541 |
+
"venue": "SIAM.",
|
| 542 |
+
"url": null
|
| 543 |
+
}
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"55": {
|
| 547 |
+
"title": "A nonlinear renewal theory.",
|
| 548 |
+
"author": "Zhang, C.-H. (1988).",
|
| 549 |
+
"venue": "The Annals of Probability, pages 793\u2013824.",
|
| 550 |
+
"url": null
|
| 551 |
+
}
|
| 552 |
+
}
|
| 553 |
+
],
|
| 554 |
+
"url": "http://arxiv.org/html/2410.02279v2"
|
| 555 |
+
}
|
20241004/2410.02458v2.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2410.03054v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
20241004/2410.03069v1.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|