Eric03 commited on
Commit
905e38d
·
verified ·
1 Parent(s): 0877330

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2003.08413/record.json +32 -0
  2. 2003.14247/record.json +32 -0
  3. 2006.16668/record.json +32 -0
  4. 2007.14166/record.json +32 -0
  5. 2009.08257/record.json +32 -0
  6. 2009.10259/main_diagram/main_diagram.drawio +0 -0
  7. 2009.10259/paper_text/intro_method.md +168 -0
  8. 2012.01524/main_diagram/main_diagram.drawio +1 -0
  9. 2012.01524/paper_text/intro_method.md +69 -0
  10. 2012.07791/record.json +32 -0
  11. 2012.15355/record.json +32 -0
  12. 2101.04904/record.json +32 -0
  13. 2101.09178/record.json +32 -0
  14. 2104.04466/record.json +32 -0
  15. 2104.04923/record.json +32 -0
  16. 2105.01203/record.json +32 -0
  17. 2105.03491/record.json +32 -0
  18. 2105.12774/record.json +32 -0
  19. 2105.14491/record.json +32 -0
  20. 2106.00794/main_diagram/main_diagram.drawio +1 -0
  21. 2106.00794/main_diagram/main_diagram.pdf +0 -0
  22. 2106.00794/paper_text/intro_method.md +142 -0
  23. 2106.03357/record.json +32 -0
  24. 2106.03632/record.json +32 -0
  25. 2106.07630/main_diagram/main_diagram.drawio +1 -0
  26. 2106.07630/main_diagram/main_diagram.pdf +0 -0
  27. 2106.07630/paper_text/intro_method.md +107 -0
  28. 2106.09563/record.json +32 -0
  29. 2106.11613/main_diagram/main_diagram.drawio +1 -0
  30. 2106.11613/main_diagram/main_diagram.pdf +0 -0
  31. 2106.11613/paper_text/intro_method.md +75 -0
  32. 2107.08981/main_diagram/main_diagram.drawio +0 -0
  33. 2107.08981/paper_text/intro_method.md +60 -0
  34. 2107.10140/record.json +32 -0
  35. 2107.11298/main_diagram/main_diagram.drawio +0 -0
  36. 2107.11298/paper_text/intro_method.md +74 -0
  37. 2108.02479/record.json +32 -0
  38. 2108.06583/record.json +32 -0
  39. 2108.13499/main_diagram/main_diagram.drawio +1 -0
  40. 2108.13499/main_diagram/main_diagram.pdf +0 -0
  41. 2108.13499/paper_text/intro_method.md +159 -0
  42. 2109.09133/record.json +32 -0
  43. 2109.14651/main_diagram/main_diagram.drawio +1 -0
  44. 2109.14651/paper_text/intro_method.md +16 -0
  45. 2111.00162/record.json +32 -0
  46. 2111.06636/record.json +32 -0
  47. 2112.00735/record.json +32 -0
  48. 2112.02306/record.json +32 -0
  49. 2112.07194/record.json +32 -0
  50. 2112.07954/record.json +32 -0
2003.08413/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2003.08413",
3
+ "month": "2020_03",
4
+ "year": 2021,
5
+ "conference": "AAAI",
6
+ "title": "Oral-3D: Reconstructing the 3D Structure of Oral Cavity from Panoramic X-ray",
7
+ "arxiv_url": "https://arxiv.org/abs/2003.08413",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.08413",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/tex_files_extracted/2003.08413",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.08413/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.08413/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.08413/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.08413/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2003.14247/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2003.14247",
3
+ "month": "2020_03",
4
+ "year": 2020,
5
+ "conference": "CVPR",
6
+ "title": "DPGN: Distribution Propagation Graph Network for Few-Shot Learning",
7
+ "arxiv_url": "https://arxiv.org/abs/2003.14247",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.14247",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/tex_files_extracted/2003.14247",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.14247/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.14247/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.14247/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.14247/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2006.16668/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2006.16668",
3
+ "month": "2020_06",
4
+ "year": 2021,
5
+ "conference": "ICLR",
6
+ "title": "GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding",
7
+ "arxiv_url": "https://arxiv.org/abs/2006.16668",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.16668",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/tex_files_extracted/2006.16668",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.16668/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.16668/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.16668/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.16668/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2007.14166/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2007.14166",
3
+ "month": "2020_07",
4
+ "year": 2024,
5
+ "conference": "IJCAI",
6
+ "title": "Towards a Framework for Learning of Algorithms: The Case of Learned Comparison Sorts",
7
+ "arxiv_url": "https://arxiv.org/abs/2007.14166",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_07/main_diagram_database/2007.14166",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_07/tex_files_extracted/2007.14166",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_07/main_diagram_database/2007.14166/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_07/main_diagram_database/2007.14166/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_07/main_diagram_database/2007.14166/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2007.14166/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2009.08257/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2009.08257",
3
+ "month": "2020_09",
4
+ "year": 2020,
5
+ "conference": "EMNLP",
6
+ "title": "Compositional and Lexical Semantics in RoBERTa, BERT and DistilBERT: A Case Study on CoQA",
7
+ "arxiv_url": "https://arxiv.org/abs/2009.08257",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_09/main_diagram_database/2009.08257",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_09/tex_files_extracted/2009.08257",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_09/main_diagram_database/2009.08257/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_09/main_diagram_database/2009.08257/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_09/main_diagram_database/2009.08257/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2009.08257/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2009.10259/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2009.10259/paper_text/intro_method.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ The de-facto supervised neural network training paradigm requires a large dataset with annotations. It is time-consuming, difficult and sometimes even infeasible to collect a large number of data-points due to task nature. A typical example task is medical diagnosis. In addition, annotating datasets also is costly, especially in domains where experts are difficult to recruit. In a traditional an-
4
+
5
+ ![](_page_0_Picture_12.jpeg)
6
+
7
+ ![](_page_0_Picture_13.jpeg)
8
+
9
+ ![](_page_0_Picture_14.jpeg)
10
+
11
+ ![](_page_0_Picture_15.jpeg)
12
+
13
+ Figure 1: An example task that would benefit from learning with natural language explanation. The top-left corner shows an example image of a ring-billed gull. In the other three images (A), (B), (C), which one is not a ring-billed gull but a California gull? Given the natural language explanation *"Ring-billed gull has a bill with a black ring near the tip while California gull has a red spot near the tip of lower mandible"*, it would be easier to find that (A) is the correct choice.
14
+
15
+ notation process, the human-machine communication bandwidth is narrow. Each label provides log C bits per sample for a C-class classification problem. However, humans don't solely rely on such low bandwidth communication to learn. They instead learn through natural language communication, which grounds on abstract concepts and knowledge. Psychologists and philosophers have long posited natural language explanations as central, organizing elements to human learning and reasoning [\(Chin-Parker and Cantelon,](#page-9-0) [2017;](#page-9-0) [Lom](#page-9-1)[brozo,](#page-9-1) [2006;](#page-9-1) [Smith,](#page-10-0) [2003\)](#page-10-0). Following this intuition, we explore methods to incorporate natural language explanations in learning paradigms to improve learning algorithm's data efficiency.
16
+
17
+ Let's take a bird species classification task as an example to illustrate the advantage of learning with natural language explanation. Figure [1](#page-0-0) shows several bird images. Based on visual dissimilarity, many people mistakenly thought Image C is not a ring-billed gull as it has a different colored coat
18
+
19
+ <sup>1</sup>Co-supervised project.
20
+
21
+ compared to the example. However, ring-billed gulls change their coat color from light yellow to grey after the first winter. So color is not the deciding factor to distinguish California gull and ringbilled gull. If we receive abstract knowledge from human experts through a natural language format, such as *"Ring-billed gull has a bill with a black ring near the tip while California gull has a red spot near the tip of lower mandible"* and incorporate it in the model, then the model will discover that Image A is a California gull instead of a ringbilled gull based on its bill.
22
+
23
+ Previous work has shown that incorporating natural language explanation into the classification training loop is effective in various settings [\(An](#page-8-0)[dreas et al.,](#page-8-0) [2018;](#page-8-0) [Mu et al.,](#page-9-2) [2020\)](#page-9-2). However, previous work neglects the fact that there is usually a limited time budget to interact with domain experts (e.g., medical experts, biologists) [\(Liang et al.,](#page-9-3) [2019,](#page-9-3) [2020\)](#page-9-4) and high-quality natural language explanations are expensive, by nature. Therefore, we focus on eliciting fewer but more informative explanations to reduce expert involvement.
24
+
25
+ We propose Active Learning with Contrastive Explanations (ALICE), an expert-in-the-loop training framework that utilizes contrastive natural language explanations to improve data efficiency in learning. Although we focus on image classification in this paper, our expert-in-the-loop training framework could be generalized to other classification tasks. ALICE learns to first use active learning to select the most informative query pair to elicit contrastive natural language explanations from experts. Then it extracts knowledge from these explanations using a semantic parser. Finally, it incorporates the extracted knowledge through dynamically updating the learning model's structure. Our experiments on bird species classification and social relationship classification show that our method that incorporates natural language explanations has better data efficiency compared to methods that increase training sample volume.
26
+
27
+ # Method
28
+
29
+ Existing research in social science and cognitive science (Miller, 2019; Mittelstadt et al., 2019) suggests contrastive explanations are more effective in human learning than descriptive explanations. Therefore, we choose contrastive natural language explanations to benefit our learners. An example contrastive explanation is like "Why P rather than Q?", in which P is the target event and Q is a counterfactual contrast case that did not occur (Lipton, 1990). In the example in Figure 1, if we ask the expert to differentiate between Ring-billed gull against California gull, the expert would output the following natural language explanation: "Ringbilled gull has a bill with a black ring near the tip while California gull has a red spot near tip of lower mandible". Our explanations are classbased and are not specifically associated with any particular images.
30
+
31
+ **Problem Setup** We are interested in a C class classification problem defined over an input space X and a label space $Y = \{1, ..., C\}$ . Initially, the training set $D_{train} = \{(x_i, y_i)\}_1^{N_{train}}$ is small, since our setting is restricted to be low resource. We also assume that there is a limited budget to ask domain experts to provide explanations during training. Specifically, we consider k rounds of interactions with domain experts and each round has a query budget k. For each query, we need to specify
32
+
33
+ two classes $y^p$ , $y^q$ for domain experts to compare. Domain experts would return a contrastive natural language explanation e. Each explanation e would guide us to focus on the most discriminating semantic segments to differentiate between $y^p$ and $y^q$ . In this paper, a *semantic segment* refers to a semantic segment of an object (e.g., "bill" in bird species classification) or a semantic object (e.g., "soccer" in social relationship classification).
34
+
35
+ To make our framework more general, we start from a standard image classification neural architecture. We formulate our initial model as $M(\phi,g_{pool},f)=f(g_{pool}(\phi(x)))$ : Here $\phi$ is an image encoder that maps each input image x to an activation map $\phi(x)\in\mathbb{R}^{H\times W\times d}$ . $g_{pool}$ is a global pooling layer $g_{pool}(\phi(x))\in\mathbb{R}^{d_{pool}}$ . f is a fully connected layer that performs flat C way classification. This formulation covers most of the off-the-shelf pre-trained image classifiers.
36
+
37
+ Overview ALICE incorporates contrastive natural language explanations through dynamically updating the learning model's structure. The highlevel idea is to allocate a number of local classifiers to help the origin model guided by the explanations. Specifically, for each explanation e that provides knowledge to distinguish two classes yp, yq, we allocate a local classifier that is dedicated to the binary classification between yp, yq. We incorporate the extracted knowledge from explanation e to the local classifier so that the local classifier learns to focus on the discriminating semantic segments pointed out by the domain experts. We first discuss the case where all local classifiers perform binary classification and then discuss how to extend them to support general m-ary classification.
38
+
39
+ Progressive Architecture Update The initial flat C-way classification architecture could be viewed as a composition of an image encoder φ and a global classifier f ◦ gpool. We discuss how the local classifiers are progressively added to assist the global classifier. As shown in Figure [2](#page-2-0) (C), we first merge b class pairs into b super-classes in the global classifier. For example, in the first round, the global classifier would change from C-way to (C−2b+b)-way. We then allocate b new local classifiers, each for performing binary classification for one class pair. Each local classifier is only called when the global classifier predicts its super-class as the most confident. We delay more complex conditional execution schemes as future work. We also note that the conditional execution schemes have
40
+
41
+ ![](_page_4_Figure_5.jpeg)
42
+
43
+ Figure 3: Local classifiers with shared attention mechanism
44
+
45
+ potential for reducing computation runtime [\(Chen](#page-9-17) [et al.,](#page-9-17) [2020;](#page-9-17) [Mailthody et al.,](#page-9-18) [2019\)](#page-9-18). During training, we fine-tune the image encoder φ and reset the global classifier after each round since it is only a linear layer.
46
+
47
+ Knowledge Grounded Training The global classifier is trained on Dtrain, with labels adjusted according to the class pair merging. For a local classifier corresponding to the class pair y p , y<sup>q</sup> , its training data consists of two parts. One part of the training data is the training data-points of classes y p , y<sup>q</sup> in Dtrain. The other part is the resized image patches of class y p , y<sup>q</sup> obtained in semantic explanation grounding (§ [4.3\)](#page-3-0). We use the resized image patches as additional training data to to emphasize these patches' importance. Take the local classifier distinguishing ring-billed gull and California gull as an example (Figure [2](#page-2-0) (B, C)). This local classifier is trained on the training images of ring-billed gull and California gull, as well as the bills' patches of each training image of ring-billed gull and California gull. During testing, we only feed the whole image into the model.
48
+
49
+ Supporting m-ary local classifier So far we have assumed that the local classifier is always a binary classifier. An implicit assumption is that the b class pairs have no overlap. We could support overlapping class pairs as follows. If some class pairs have overlap (e.g., class pair (P, Q), class pair (P, T), class pair (T, U)), we only allocate one local classifier for them (e.g., a 4-ary local classifier for class (P, Q, T, U)). We also merge all the relevant classes in the global classifier into only one super-class (e.g., super-class {P, Q, T, U}). The local classifier is trained on the union of the overlapping class pairs' training data including patches.
50
+
51
+ Local Classifier Design Our framework is agnostic to the design choice of the local classifiers. Any design could be plugged into ALICE. We provide a default design as follows. Ideally, each local classifier should learn which semantic segments to focus and how to detect them. Since
52
+
53
+ <span id="page-4-0"></span><sup>1</sup>torchvision.transforms.RandomResizedCrop
54
+
55
+ different local classifier might need to detect the same semantic segments (e.g., bill), the knowledge of detecting semantic segments could be shared among all local classifiers. Therefore, we introduce a shared attention mechanism, which is parameterized using M learnable latent attention queries $q_1, q_2, ..., q_M \in \mathbb{R}^d$ that represent M different latent semantic segments. To keep our design general, we do not bind each latent attention queries to any concrete semantic segments (e.g., we do not assign binding like $q_1$ to "bill") and these queries are trained in a weakly-supervised manner. Following Lin et al. (2015); Hu and Qi (2019), we view the activation map $\phi(x) \in \mathbb{R}^{H \times W \times d}$ of each image x as $H \times W$ attention keys $k_1, ..., k_{H \times W} \in \mathbb{R}^d$ . We compute the attention by:
56
+
57
+ $$Q = \begin{bmatrix} q_1^T \\ \dots \\ q_M^T \end{bmatrix}, K = V = \begin{bmatrix} k_1^T \\ \dots \\ k_{H \times W}^T \end{bmatrix}$$
58
+ $$A = \operatorname{Attention}(Q, K, V) = \operatorname{softmax}(\frac{QK^T}{\sqrt{d}})V$$
59
+
60
+ Where $Q \in \mathbb{R}^{M \times d}$ , $K = V \in \mathbb{R}^{(H \times W) \times d}$ . Each row in the attention output matrix $A \in \mathbb{R}^{M \times d}$ is the attention output for each attention query $q_i$ , which is a descriptor of the $i^{th}$ latent semantic segments. After the shared attention mechanism, each local classifier applies a private fully-connected layer on flattened(A) to make predictions. Each local classifier could ignore irrelevant semantic segments by simply setting the corresponding weights in its fully-connected layer to zero.
61
+
62
+ Implementation Our image encoder $\phi$ could be any off-the-shelf visual backbone model and we use Inception v3 (Szegedy et al., 2016). We implement our semantic parser on top of the Python-based SippyCup (Liang and Potts, 2015) following previous work Hancock et al. (2018). Our framework could support applications in other languages by changing a semantic parser for corresponding languages. We provide more details in Appendix.
63
+
64
+ **Dataset** We use the CUB-200-2011 dataset (Wah et al., 2011), which contains 11, 788 images for 200 species of North American birds. We randomly sample 25 bird species due to limited access to expert query budget. Following Vedantam et al. (2017), We make sure that each sampled species has one or more confusing species from the same *subfamilia* so that they are challenging to classify.
65
+
66
+ <span id="page-5-1"></span>![](_page_5_Figure_6.jpeg)
67
+
68
+ while Least Tern has a yellow bill, orange legs and feet.
69
+
70
+ Figure 4: Saliency maps visualization. Guided by expert explanations, ALICE learn to focus on the discriminating semantic segments and make the correct prediction.
71
+
72
+ In addition, each image in the CUB data-set is also annotated with the locations of 15 semantic segments (e.g., "bill", "eye"). We use these location annotations to crop training image patches based on the explanations. We do not use any location annotation during testing. More details are provided in the Appendix, including the list of 25 sampled species. We experiment with a low-resource setting with only 15 images per bird species.
73
+
74
+ We employ an amateur bird watcher as the domain expert since we do not expect general MTurk workers to have enough domain expertise. To further ensure the annotation quality, our domain expert checks the professional birding field guide $^2$ before writing each explanation. We ask the expert, "How would you differentiate bird species P and bird species Q?". In total, we collect 67 contrastive natural language explanations (avg. length 18.45 words). We collect the explanations in an on-demand manner because our class-based active
75
+
76
+ <span id="page-5-0"></span>https://identify.whatbird.com/
77
+
78
+ <span id="page-6-1"></span>![](_page_6_Figure_0.jpeg)
79
+
80
+ Figure 5: Comparing the performance gain of adding contrastive natural language explanations and adding training data points on bird species' prediction accuracy. Empirically, adding 1 explanation leads to similar performance gain as adding 30 labeled training data points.
81
+
82
+ learning is empirically insensitive to the change of random seeds and hyper-parameters. Our semantic parser identifies 2.36 semantic segments per explanation on average. In each experiment, we conduct k = 4 rounds of expert queries, with a query budget b = 3 for each round.
83
+
84
+ Discussion on CUB Description Dataset The CUB description dataset collects descriptions of visual appearance for each image rather than explanations of why the bird in the image belongs to a certain class [\(Reed et al.,](#page-9-22) [2016;](#page-9-22) [Hendricks](#page-9-8) [et al.,](#page-9-8) [2016\)](#page-9-8). For example, an image with a Ringbilled gull has the description: *"This is a white bird with a grey wing and orange eyes and beak."* However, this description also fits perfectly with a California gull (Figure [1\)](#page-0-0). So the crowd-sourced descriptions in the CUB description dataset is not ideal to support classification. We collected expert explanations: *"Ring-billed gull has a bill with a black ring near the tip while California gull has a red spot near the tip of lower mandible."* to improve classification data efficiency. In addition, we also conducted experiments to incorporate CUB descriptions (5 sentences per image), but we did not find improved performance in our setting.
85
+
86
+ Model Ablations and Metrics We compare AL-ICE to its several ablations (Table [2\)](#page-6-0) and evaluate the performance on the test set. We report classification accuracy on species as well as subfamilia. For subfamilia accuracy, a prediction is counted as correct as long as the predicted species' subfamilia is the same as the labeled species' subfamilia. (1) *Base(Inception v3)* fine-tunes the pre-trained Inception v3 to perform a flat-25 way classification. (2) *ALICE w/o Grounding* copies the final neural archi-
87
+
88
+ <span id="page-6-0"></span>
89
+
90
+ | No. | | Model Accuracy (%) | |
91
+ |------|----------------------------------------|--------------------|--------------------|
92
+ | | | | species subfamilia |
93
+ | (1) | Base(Inception v3) 59.51 | | 86.50 |
94
+ | (2) | ALICE w/o Grounding 66.47 | | 87.95 |
95
+ | (3) | ALICE w/o Hierarchy 59.22 | | 86.94 |
96
+ | (4) | ALICE w/ Random Ground 64.44 | | 87.52 |
97
+ | (5) | ALICE w/ Random Pairs 42.67 | | 75.33 |
98
+ | (6) | RandomSampling + 33% extra data 66.76 | | 88.39 |
99
+ | (7) | RandomSampling + 66% extra data 71.26 | | 91.00 |
100
+ | (8) | RandomSampling + 100% extra data 75.91 | | 91.58 |
101
+ | (9) | st round) 65.46<br>ALICE (1 | | 86.07 |
102
+ | (10) | nd round) 70.83<br>ALICE (2 | | 89.84 |
103
+ | (11) | rd round) 74.46<br>ALICE (3 | | 91.00 |
104
+ | (12) | th round) 76.05<br>ALICE (4 | | 91.87 |
105
+
106
+ Table 2: Test accuracy comparison among variants of ALICE on the bird species classification task.
107
+
108
+ tecture from ALICE but does not have access to the discriminating semantic segments (§ [4.3\)](#page-3-0). (3) *AL-ICE w/o Hierarchy* has the same neural architecture as (1) but has access to the discriminating semantic segments. (4) *ALICE w/ Random Grounding* has the semantic segments that are randomly sampled. (5) *ALICE w/ Random Pairs* replaces class-based active learning with randomly selected class pairs. The randomly selected class pairs are used to query experts and change the learning model's neural architecture. (9-12) *ALICE* i th *round* shows ALICE's performance after the i th round of expert queries. (6-8) *RandomSampling +* x% *extra data* augments (1) with x% extra training data points.
109
+
110
+ Results Our first takeaway is that incorporating contrastive natural language explanations is more data-efficient than adding extra training data points. Figure [5](#page-6-1) visualizes the performance gain of adding explanations and adding data points. ((6-12) in Table [2\)](#page-6-0). As shown in Figure [5,](#page-6-1) adding 1 explanation leads to the same amount of performance gain of adding 30 labeled data points. For example, adding 12 explanations (ALICE (4 th round), 76.05%) achieves comparable performance gain of adding 375 training images (RandomSampling + 100% extra data, 75.91%). We note that writing one explanation for an expert is typically faster than labeling 15-30 examples. As an estimate, [Zhou et al.](#page-10-2) [\(2020\)](#page-10-2); [Hancock et al.](#page-9-7) [\(2018\)](#page-9-7); [Zaidan and Eisner](#page-10-14) [\(2008\)](#page-10-14) perform user study and find that collecting natural language explanations is only twice as costly as collecting labels for their tasks. Our experiment shows that adding 1 explanation leads to similar performance gain as adding 30 labeled training data points, yielding a 6× speedup.
111
+
112
+ Our second takeaway is that both the ground-
113
+
114
+ <span id="page-7-0"></span>
115
+
116
+ | No. | Method | Species Acc (%) | | |
117
+ |-----|-----------------|-----------------|-----------|--|
118
+ | | | +33% data | +66% data | |
119
+ | (1) | RandomSampling | 66.76 | 71.26 | |
120
+ | (2) | CoreSet | 68.06 | 73.09 | |
121
+ | (3) | LeastConfidence | 67.34 | 71.94 | |
122
+ | (4) | MarginSampling | 66.04 | 70.36 | |
123
+ | (5) | EntropySampling | 66.91 | 72.52 | |
124
+ | (6) | BALDdropout | 66.33 | 71.65 | |
125
+
126
+ Table 3: Instance-based active learning baselines on the bird species classification task. We note that ALICE (4 th round, Acc 76.05%) in Table [2](#page-6-0) (12) outperforms all instance-based active learning baselines with 66% extra training data (Acc 70.36%-73.09%).
127
+
128
+ ing of explanations' semantics and the hierarchical neural architecture improves classification performance a lot. Removing the grounded training image patches degrades ALICE's performance (AL-ICE w/o Grounding, 66.47%). Substituting the discriminating semantic segments' image patches with other semantic segments' patches leads to worse performance (ALICE w/ Random Grounding, 64.44%). The hierarchical neural architecture is also important. As shown in Table [2,](#page-6-0) a baseline model augmented with hierarchical classification (ALICE w/o Grounding, 66.74%) outperforms the flat C way classification (Base(Inception v3), 59.51%). Similarly, removing the hierarchical neural architecture from ALICE drops the performance a lot (ALICE w/o Hierarchy, 59.22% v.s. ALICE (4 th round), 76.05%). ALICE morphs the neural architecture based on class-based active learning (§ [4.2\)](#page-3-1). If we replace class-based active learning with a random selection of class pairs, ALICE learns a bad model structure that leads to reduced performance (ALICE w/ Random Pairs, 42.67%).
129
+
130
+ Additional Experiments Table [3](#page-7-0) shows our experiments with several common instance-based active learning baselines. We show the test accuracy of adding 33% extra training data (i.e., 125 extra data points) and adding 66% extra training data (i.e., 250 extra data points) using the instancebased active learning baselines. In this case, we observe that ALICE with 12 explanations (Accuracy 76.05%, Table [2](#page-6-0) (12)) outperforms all instancebased active learning baselines with 250 extra data points(Accuracy 70.36%-73.09%, Table [3\)](#page-7-0). We delay the combination of instance-based active learning and our class-based active learning as future work. To testify whether ALICE could work robustly with smaller amount of training data, we present an experiment on CUB starting with as few as 5 images per species. ALICE with 12 expla-
131
+
132
+ <span id="page-7-1"></span>
133
+
134
+ | No. | Model | Accuracy (%) | |
135
+ |------------|--------------------------------------------------------------------|----------------|----------------|
136
+ | | | relation | domain |
137
+ | (1) | Base(Inception v3) | 33.67 | 45.39 |
138
+ | (2)<br>(3) | ALICE w/ Random Ground<br>ALICE w/ Random Pairs | 27.20<br>22.94 | 42.52<br>35.29 |
139
+ | (4)<br>(5) | RandomSampling + 20% extra data<br>RandomSampling + 40% extra data | 34.91<br>36.28 | 46.51<br>46.63 |
140
+ | (6)<br>(7) | st round)<br>ALICE (1<br>nd round)<br>ALICE (2 | 35.29<br>36.41 | 47.13<br>47.38 |
141
+
142
+ Table 4: Test accuracy comparison among variants of ALICE on the social relationship classification task.
143
+
144
+ nations (k = 4, b = 3) improves the accuracy of the base model from 49.76% to 62.80%, outperforming the base model with 15 images per class (Accuracy 59.51%, Table [2\)](#page-6-0).
145
+
146
+ Visualization We show how the explanations help the learning model as shown in Figure [4.](#page-5-1) We visualize the saliency maps [\(Simonyan et al.,](#page-10-15) [2014\)](#page-10-15) corresponding to the correct class on four example images. As shown in Figure [4,](#page-5-1) the base model does not know which semantic segments to focus and makes wrong predictions. In contrast, ALICE's local classifiers obtain knowledge from the expert explanations and successfully learns to focus on the discriminating semantic segments to make the correct predictions.
147
+
148
+ Dataset We also evaluate ALICE on the People in Photo Album Relation dataset [\(Zhang et al.,](#page-10-16) [2015;](#page-10-16) [Sun et al.,](#page-10-17) [2017\)](#page-10-17). An example is shown in Figure [6.](#page-8-2) The dataset was originally collected from Flickr photo albums and involves 5 social domains and 16 social relations. We focus on the images that have only two people since handling more than two people requires task-specific neural architecture. The details of dataset pre-processing are included in Appendix. After pre-processing, we obtain 1, 679 training images and 802 testing images. We experiment with a low-resource setting with 15% of the remaining training images (i.e., 264 images). We obtain explanations by converting the knowledge graph collected by [Wang et al.](#page-10-18) [\(2018\)](#page-10-18) into a parsed format. The semantic segments here are contextual objects like soccer. The knowledge graph contains heuristics to distinguish social relations by the occurrence of contextual objects (e.g., "soccer" for sports v.s. colleagues). We use a faster-RCNN-based object detector [\(Ren](#page-10-19)
149
+
150
+ <span id="page-8-2"></span>![](_page_8_Picture_0.jpeg)
151
+
152
+ ![](_page_8_Figure_1.jpeg)
153
+
154
+ **Explanation: Sports team members appear with balls, while colleagues appear with laptops, books and ties.**
155
+
156
+ Figure 6: Examples of social relationship classification. Explanations are reconstructed from [Wang et al.](#page-10-18) [\(2018\)](#page-10-18)
157
+
158
+ [et al.,](#page-10-19) [2017\)](#page-10-19) trained on the COCO dataset [\(Lin et al.,](#page-9-23) [2014\)](#page-9-23) to localize the semantic segments (contextual objects) during training. The object detector is not used during testing. We set rounds of expert queries k = 2 and the query budget b = 4.
159
+
160
+ Results We compare ALICE to its several ablations (Table [4\)](#page-7-1) and evaluate the performance on the testing set. We report classification accuracy on social relationships as well as social domains. We observe similar benefits of incorporating explanations to ALICE as in the bird species classification task. As shown in Table [4,](#page-7-1) the base model with 40% extra training data (i.e., 105 images) still slightly underperforms ALICE with 8 explanations (RandomSampling + 40% extra data, 36.28% v.s. AL-ICE (2 nd round), 36.41%). As shown in Figure [7,](#page-8-3) adding 1 explanation leads to similar performance gain as adding 13 labeled training data points. Our ablation experiment also confirms the importance of class-based active learning. If we replace classbased active learning with a random selection of class pairs, ALICE learns a bad model structure that leads to reduced performance (ALICE w/ Random Pairs, 22.94%). The performance drop in domain accuracy is also significant. We suspect it is because the bad model structure confuses the global classifier a lot. If the global classifier calls a wrong local classifier, the local classifier is forced to make a prediction on such a out-of-distribution data. In addition, our ablation experiment also verify the importance of having knowledge beyond having the localization model. Substituting the discriminating semantic segments' image patches with other semantic segments' patches leads to worse performance (ALICE w/ Random Grounding, 27.20%). One reason is that there are many objects in each image. Under our low resource setting, learning on the image patches of random semantic segments may make the model to latch on to sample-specific
161
+
162
+ <span id="page-8-3"></span>![](_page_8_Figure_6.jpeg)
163
+
164
+ ![](_page_8_Figure_7.jpeg)
165
+
166
+ Figure 7: Comparing the performance gain of adding contrastive natural language explanations and adding training data points on social relationship classification.
167
+
168
+ artifacts in the training images, which leads to poor generalization.
2012.01524/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2020-09-10T11:35:37.626Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36" etag="vVQKRc_xI0MO3L9_Qc7A" version="13.6.9" type="google"><diagram id="EDjnFcT19GF4L3TFRMHb" name="Page-1">7V1bd9s2tv41Xqd9MBbul8dYdjqzJunpNGnTmZcuSqJsJZKoI8mOk19/AIqkSBC8SSRD2VJbVwIhUMT+sG/Ye+OKjJbPv2y89cP7YOovrjCcPl+R2yuMCcdc/8+0fNu3KAj3Dfeb+XTfhA4NH+bf/agx7vY4n/rbTMddECx283W2cRKsVv5kl2nzNpvga7bbLFhk77r27v1cw4eJt8i3fppPdw/71uTnmfZ/+PP7h/jO8YWlF/eNRtg+eNPg674p7EPurshoEwS7/bvl88hfmLmLp2U/0NuCq8nv2virXZ0vfH/89o/3G3r3XU/KeE6+bN5vPl1TTPfjPHmLx+iJo5+7+xZPgb+avjEzqT+tgpVuvJl62wffjIv0h4fdchG9Ne2/ebudv1mFLRgS3brdbYIvyezpB7/xn+e7v/R7GL3/T+r97XP6w7f4w2q3+fZX+sN/0h8OXwo/xd/Kz1FMjOBxM4ke7/fRzX8X35e/3C1vJ8HsD+Uvv7y/jqHmbe79XckEIkT2Pf1pBkcRDX7xg6Wvf5Hu8PWAHhZh5CEFnLht4y+83fwpiz4vAvF9Mlxyh9+CuX42DKMFd80jGMBovTEJs2PsHz36Whov1kgJxKOBELcG2s9NbiD9JvXch6YQjo2giX8kNFFzaKILNEuhSSxokmOheY2FNRLpG5tkKNhER2ATXbBZhc2j2WYOm6x3bLKhiPSa2IQXbJZik1qSWByNTcKtkXrGpqjBNjfB42qaIPHrw3znf1h7IWW/ahMji87ZfLEYBYtgE36XTD1fziYJPlNX+ET641kZfp78zc5/LqV3fJUxwCCWTCAa/pVZYRTTPQUPohz4wEwBVoyGzOQ3nekExPWmGlZPdW5KYfhyMgOLLLPwZTOZFiiBkYhnMNZSqcpNPuYoP/mIdjX1oobeeq+nfl1/AhLz1hvHI8DSibmmEqj0S2RmiUKUmyUOBSA8P1FEos5QSmANWVUwV+Vzf/IMYqqyi9zCmWMGCUfg0F//JQ7Yoe7WvHDZ8nyxMwsyMAyd3+/CdZduS800/7/HIL5wvQ09MW90Bz3tz4eL8SjvPnx8H4+kf63rBuON3ZLreApDspjMjRzdvH3rZFQCKnjrZFQ1YXXAaX0hAfNSIGlLI4LCruDAcvNrNJoP0cdgs3sI7oOVt7g7tN5kKZCabaddrKFc6JoJr332d7tvkVPPe9wFuulw33dBsC6UBeV0SCt77oePOFmlthcvmkplr7YWdxrR+GUND2YNX+P8GlYUYNXnKhYXQAwGEPoFJM9jQgBE+8SEfM2cndbl7GJYnF1Vq7opt8xk4W2380mWVE0ncyAeDMIBF2nN2FKlj3RnlA9rr76OXRuxT+YVkhdpYxPBlN3D26Fvxbh9Exi9WgKHjipyWGikHfqWD9s3eYsV7/HRShZ3KVkAgAodSzePC9UprarsyjxzkT8/rUhFTd5ifm+c+BONPl+33xjFZz7xFm+iC8v5dBrqCi4lLas/dKdjIa1NZbwo2VXPRU79oi7fale6l6zSx6thQZywuDKhGNT/G+/f1NWuXzwcNJOgKgUHOSw4yI7hcCVursToWf93JW7PERrT+caf7OaB+ZqWDIZKXVpo0oJLBi0sjxbcp0tOuZTENtGCzhEh3aGB4qwoYXRQzINAnlchTzDkE3P9YL3HW+p1zfXiXb7WddeTlVKzxVW00q9j2laoj3WHrdBttdbvfUt1W5sO26N/vB3Kecq39Zv9r2tVXU6s7bah+/KBS2hWQmELuOw44FYMS2rhuDV4OOOKs5IuDvWOZRlyibXEAjtNMj44JGPOrKuSlvq55+ttkUxLodnbrveh6LP5s0F7K7ETHUjJGBuI4HhTOh0QIOPGtDA8tHYQs1KtHvULmnLl+wKgZFskG1amAUXycFJOOHUYAuUM0v2RcMqb9hcIxVdRDkI0ByHhUM5FV8o5qhNHe/TepivqtrvZvSYcpKwggYQ92TI32YRT14IVCuAWVmzw/vO/1eYP/zf4r5X3NBLT0T+RnvLKBVu1ACnNLkA4SWB8aNTQn82M19peqD0sQ2tpmVe4wV2T+IUBbsXMGaoMsXWDQ9j3u7QQys/o+di9qC87wQraJJYOX98yyA7Eam7YtGYMoGpjoFKwYpdgfbPbaaoY3yKG77xv/ubcHV9tREDHkjTt5GSOwN7O3FoI5SMPHfEm0WpN59i1tpBjZcbkqkweN0+JjM0I3Nyif/sWQllKmerskXhaO/d/USvtgPNaq7qxq8rOtUPlzik7x4T04o7CLXjWnSymnVFGupNhORj+qdUBDTfze0v3gE/81Wz000T/+bl6p7m92L0TLIw7dqPuREssUVrxCRInKms6GJu4VJ4OvRu4XOmJ5MuFUbauR7GsAtwVo6TQYnykF8ZX7od/YaiqjA5NtJ8fhr/GsCmXl/n+qKE8ltn+WPUCyw7CqYqca/tR5nFDOoJ9Xinwjoq42vj6V0WZZAavEY11b6ZFr4lKN2sk8i6ilNWw8Gc7h82wM+vnZqsl63x1/zFcTNe0HWnIWNY8IDIvCl2hg91ZByUpLnoGVk4kRF4cg4LN/fgnGOpP+v7w8E4/6c8tqG8lDty7iuCKpHn/GC8RTRQzAGkqr9S2PWP/VFrRcoXYdAYv7EyYsWjQdfotkSqbfpuNOxEqb6I7cwOJkp1Nk8vX2pZ3O9wz8PnEmYo/FWrcZGP+QNFqmFZPaHe4q1MSpi238Ww2wwWzy8ec8W5mN77KAc+8so4IlGcB3PqGyFOmoEsHdKpT8exV0AnmN30GRag6dWxeBqGolhbFhCLQsZs+JEJV5SyfqtonqnmSOHHIQi1rcm2wN89fHepeQVvoIwSQzMtK35J5ZaXoK2n4dae6tJFqVydruO4+X09ZdnEVm0PaleWqrJ93JSpG6jjXKglObDmpnVAXF7nzJg+GSsFmaoxFNvrp69/zvW8cBjPjKnrw9d9pMHlc+uGN5ltD9uXYn5r1h+HjVhtS0Xfvom/qqQmiJn8/XFVWVxucpz5HaWxUFuK/PicRFkSTojkZtdwVamDXIWuRWRTvPTf3NhhJF71FjHTsbvhamc3xChwOWEAAs04G4RBISXHafnwM0qWangmo8AVUmDMbVDIWST8QVG36RXsG1fwCKgenUiJvuKFene24JF158KBqkvr6GgCW51oIOlIYscvx2SHCijOgB46w6nTZV4AqlkfVEBQsV7GkMwHVRcFygUr+eFCVZN8PHlQXBcsFKsUdWnuvKfxYucpCnQmoLgpWFcC0gpWv89yzgqWanTXyQtzdhKCyEIrjvd9ENRy4a2d4/AO6zDNsYXVgK05Rxnll6SwynMcDsc+NanFl9JCh2QpfsWcurwqlWE928khnk+dyN714tkIVKQ1gg7BeoYzcwIyRvPCwB+uYlZCYhD8y9k1ZxQAQzBdMli4BSjo7ZaLOaV2nxLpNmS+n9MoRPCLx2KQb1N6+P1CwUaybez4hwIITiRRUjIqYN3cwvX1GVB11xM/Jcx1fteMecAbqAkDJKBaIMC4kUXnVkQpgjgnRshNS5j6YpqBLB2TrM75qyGTjBAguJaVh8ixjjmSxQdGtjuB+DXRDAgOKMOSQaHMZCpZ3BgyKbt1Vjm0QJvcKY+KOQB62Q2dtI5EDLBGFAhKGBJOOXZiCETI6rQCSU2L8oBJSiFRX0CM1apfv9cIcA/CR1m6EiwEoLojn1m06UyaJyJtOTuWnsxKuCRPqSpk8asIryH66MtnhfPaaN8HMP05xFr4ilmsV6CGkm0l/iVol6dMYOEdynpm2SXrNwjhDep6ZFkoGkazxGrXQ5sh7aVpojROwMtUiqtzP1dUkHNVnDLMoOfKs4XHlqVIZpxxYXl6horKWReLhHIiXXVhqO7Xj++t61YVdfMo++rzAo960CAe3tmhwRVENwdx2SVF/afdnFf3t30N7KNpBYiV/AAu07cVWvYRisT6QJSTtakL2NurQlpAN2fg+dZcElb1AvM1wndaLj8jiaJ0rNlo+XmJ0FLX3WHM6T79RhSQ+COMcEbWd3y+9C6hsULH4rNa0Jo16RRUvzjCbzp+cpDckuI4m0dD+8+N2N5+ZxwxnCdZKlm0MMOcoIc3CnFnuLQ3ZVuPtev8rwmTbfGuw1rpDvn2fhKtHAlf7XNuPf8cJuF/nO5PW65nJrHmXB23U+SvHhe3O2/lbxwWTH1xv8KrVE1It23oGhPRXk2BqyiqP7Cf2UqWXv4ZLwiRNe6tpTM3rr/OtyaueJKVPn8LSp2G3jbly76/8jZ74KWhlOk970qOJUjRQml1+Mo/ra8CaqdS66Bc/mbUwwXz7uNR/f/p0/fHNrz+XwG7rQvVhWsfeNhwucGG8YIkVsPdWcVtnitabYB1sDKAc67D0R6bbwsq6+jm/nMx5EtgWzHfYZaOF3DpYTfflAUxBAPdd4TLYOob6EXOv53kcSuXk0eFPHyPcZX9fWN8gXqOOhQzKf+lLK24gc3FtkACHooCcCQi20dWiplAcVnAGAubjgwtaV1ENDg3Wib/dM7VxaIzq/26CT4abxpLJTKkpx6H//PnmbrvXnkM59LJlyiRYrh9DrSXiMQtvty9fsg291Xt96XtUBNxmS1FJk5pM8nf/Wq8dzyBWr7u91M91+rgJ+W5OU/gxfG4WLBbB1wNyNr4G2Xa3eZxESsvS91avnYUlBkymQIvLfmbd8a/TT1A+7dCf27rk7uPQn+a7k0laY7y/nA+37vVwH8JrbPN0HYSOkoCVGOqObVnEKMDpF8lPU3cVWJNQps4qsMqJ7y5pOJaMsiZblwlJGwUSCVpZo7C7eqxE1Ih5a7F8ZFvVbpvMdQJ2QDMvy0WaRz7F1dvCRX06oJTLg/oqKeWoyDowUvVb5Hi4pCIob/kNjFTFhe8uVVn7jv1pjj9MgUy/smfYuIqyFnyjl5qsxFms+Rx2wb6/uO2vNhGoHOpxEpCAgKP6VL+nPRBRw9g4NWQlDkyhV8cHpjjORhvpV5kNWB2yElsQAwlZQdDO1rC9BnVjVhDklriFwj6YraWwFQTt0CxeHrdy8I/EX6BVX1D2tMjyL9iHYVfeAdGmd2j6DEg2vAO245eiQ2i7De9xlgbPCAuna0jMjHMo40I6+rjIvVAxfD4M2LlJwizYKJjqvmzkr7fzhfFBlgqeAUuY5k4qaJVGwNyV9dCr5JDFkWB9QiWFBwOUpYGNt3vQdLkSN79eiVujF43Qz68KLcTiHUQ4jiynql+81Ehs6tytSa2ZSarJDset6ayN3aJbc4ym05lTpUJQEOU30IkTkp6RW9NZJborD0yN7P/7jTed+5mcLOF73G9iHTegwwtxeTrLMl+oeG7uUGft49dHRg4gPGdXaUm14YurtG9XaXP8nZmrtNUyxK27SksKxt6+ElfpERxQAIQyOneWA4q8IOvZT6pwDUnVqNpj1hsKpEg7RBHQeKxwiYaffvM3c/2Ihrm4/aRJkmBT6g3EKWqfvh5XQ2zsEtVmB4KHl3VKX27crktNOutenzmghlWqFBFknQPK5JHYoVgClCp6WjFux9ihsNL1Vh3yB11yChUrSacNTPMDw3wTaabdHZpEreGrBW+pGtjI6ZJPyS4q1eKwOcbhP05LJXy15CbkyHaG0ZyYdRWzorArMUuhKz5mMLpdyTb4szkMYONPLgcBhMgiUJu0B20ue6wlIw6Loit1Lnj/+d9q84f/G/zXynsaienon+i6his6nF5/c/fk72c5XNQP3jSUxmb+k41u2NA21MuYktDKd/gsFt7YX/ymyRhG2Tssx3dWh3Gw2wXLQvrmLNDgcbeYr/TvCx3nsZB/8NbmuZfPmhutH8DT0xQk57m2Aolrc0Z3um64ZWVKyZzJQFwAh3HZynmnTmQUh+GcdLCucwPrg7dch4lkt4ejc03OyXOUc1LGP17+YbjXCGbZhrYQ8+EyxJUs1tlpuHrhFuLjDJLF7g5HMpvTnE3qk/sA531+cTp7bPeg1Z97kzD+7sPH91cvPDcsSt00Py/OeU9y3MMl+pCcgL3P2J7swZPps036HDKMC8W/Kz8tkx07hJyw5MRu/WB3Wt/Ri1DcRs+401ZjOLq/fmVZYUJm/fXSeWq3S5FuIynMKcVqbKv0XCgOQImv0sXiDDMvLRenP9i+iJR34+DM+E/yDF3UkEvOyKmMJhTDKiHHrZA2eWwsIbUC0bg9UFsl5CxPnyTlIXXcigoUsLy/ndEmWi4J51yIw6vYCCDimYVI5Cnr8LIMKyo5Cs3qUn7n7MaG5BxoNTe5ag1fe4kibMxtyYTxVDKR1Zt5zQPZGi9YqzCeqqr5aEU3y4pAYWHFasmq8a3+oqKAnkClDKGSQYmK32MfWxYzrEIq2onIVf1Rs/6EZU1uLnpggPFvHDIHlLyicm05CxQXHlixhWcyXVgRD1RSAqHgqTxQc7kyHkgYkN2wQWHpR0n0bd2CovEZHXX7iwq+afOpKr7AbL5TUYuX2nytZT7ihLQkObZh0P0h+hhsdg/BfbDyFneHViuAJsVE3PWrDzZFcu2QoBRe/Ozvdt8+7K1HY0gaf2py53dB6EQvtSorF3m8dNtb47UX70kWJ2phS/KkKiRv6roXBlmFhCCYO/0yOTC2h0okbpoWOzv7oenorGlKhQRZkjoOX+qKoL++/VXdLuHn+8mff3y+F98e/vpffJ1fox/DqmGVDrhhxDs2r0trachK5QnQ1Wn3TgLg1hdUQZzsm7B3cRDsuYa8HuGcFTZjTezQH4WCYrY6Ph4CN4UUf7mkzdqzIo5c/FF0Ld47PoGuo9dHV8seUcyRu9gnXYtPfDqBrrevnq4IKdIbYd3hZs6jgFveV44k+8ybZAd68BdPviFHRj8+cjf8ZhGY2qHwdu7db7xlaPatN/7WlBgPays7KsSajdxJtHs79Tfzp6gw6b4W7D4mIF2lfBlMHxe+KSQf3u1Qh3z1uBz7mzBMIPXVYLMvL6s/fUu6avBNHhdRzXL4ZhVeMN6+/c95E28mxxvNN1GDKTa6r9Tq7axLU3/tr/axDCtrmNm+Em5YWX1f7jV8RFCoBbm3rd0LssLSaGHBaANMcaCXCVdo/xdbC4iFPi/GOFWIK5IPj5EUAsQopTD6m19diHGgpKCUMCgJ5PEBlO1bHzVcwV0ngF8j++Qph02GhIsLIcgQwB3NTdXR1m2GnaWizfaloDMcIQxkHesVGUd2vKwwtHJkNsi8RQJQIQ7/ZkBFJQRQKaF7mf8layrjb+8ocO330c1/F9+Xv9wtbyfB7A/lL7+8d1j+OcI0Cm8vqfLkiHW/4+afLulBIAFammPIEZYKSpFXGRXFgDGp+SXUC5wjR1WHcIk78jxaJ0exH2Dd7lr/GJ4k8knL4lA50JSajx8jeb4/DuZrjVWum9eFC7rdtIgkSbssFbs4gbszgFGi0UOJlgycIUllNnsMQQUk5pIzLhCnSORrSGOkANWIgpJLqXlCPEIGf5gBFn5fIW02iNjH3zr+ij0Q6+OxdhefNaR7vPc00p4jkN1ZEHvtWOLCnBgrIOOYakxZ6f5UAJhCknRASVIAmWSCKQYpj5NjM0iiWleBTAmIqMIsKRHcOpJcmRSdcLKU1pIGWqKy+MtxPZXlAkDjFwdYIwtJonGIuVWYknIgSMSBOHJUn8DaLNHQIlqmQqy7uABICCDmy7qHkgyKjvBX7HPrQpI6wJccqHYBXYWKpgjQ0k+rxho26lD3KalRJoEWowYqlGtRyvK+oTqw0/YuxRHPI6gNW9YJu2K/UcuwC22giO9lhOqbC+paQB2TBGiFjmj1ThImj+V1CnCSiFrSQoyFE3TFToJurIb9+YOjxHOYwd/o748XBLaAQEm0MogxlIox3VnmfXi12J4AVCZmA+8KgcVHDeUQ6PQM9YKFaPejQ+3J6EeUKoYQliI+97w+PWuo7wwCjlLae1fqU3Hhro7UJ4cPYvf39MJJTgdeDVFWx27EQIiD+tSCA8K98+bAnUXTqkDwOIYzykGJojjjKwWR3AWJK4cw7bLq8+lCOw2JnI77LPPIpMM+y+ydoUR2IwWk4hqTpoASl9RCpJ7mBJDYtgHrRnZLDVvKNOYRlVrXsm8CMYCJt0RBlb1Jx6V4atTqrcTxkZg8Bv/DwfHAiowJZLy7GqXGW0FpFmLGdctjhmiwfByOiV4OerEIbdxSbP6X9fsxbQCrg8uE9ltTqk5V5FcFZFoTyGpQQDbHRgChCBQIckIospil2es/WAnIwljtemiMKkD1KFpdEAzHdVuSvDgCoDpYw5bruWMg18i4fV1AVjWBLIcFZEI4IJxrka8RRHG24p4wvFIUmbu1YVx6D2mCa2RqD6VXGNcJlnldOOY1cSwGhWOsjA5MKdMSn3NELecP5EDrzonX+0gNufwmChIgkDy4JPoFcgt1Tl8WkB1ZvGWAHwiQGZZGqmszCykpMc3uGWr0AY4OGvKRikX5TbgkQCsbyavfir2oDZ/FkaffnTeQh6UicyZNII42wSDnStleNMqARlYSgnNktelyVx03Ke8i56rrC8itOi2OA3I28bq8/EL3QBY1gTwsFZlrqS+ElFBxrQtjqwqZogiQkxmyOUrRWHpCcan0PbIMWSvNCEAaOvdC5aJfHNc5A+hVaRa4Jo6H5UQmmlcSIhRTmhViaJlhWGDNkAu119rOt9KbEIk1jg+2Xs9AVnncNqxt4axC4ahWUVkDo6ISzkmn97YEffccyprYr6tVNwN106Ix5XAUUvNuYgdvHgdtbk4zZMkejJQWgjuo1RLn613w3AOe67o7XgqeFUKA8YPLhHePZ9Qg7OW0KIl9bWV4t5oEU3/zPyZvMVd/eChREjM58ScFURK+x8fhMRW5KImx1BaWO6H41CiJxLjC2r7b6wrGfZvPBMICAUR1P0yVRIJIVyqQ1AYcTG0rq+IVc1IkBHb5x04Fl/OgClNgxyoiDamjzdWPONrACW2ue8gBYbvtw1laSztUEnCCzJY0Y0zzv6wayyGAIkE+dwSfEUEBpVBzT4Q4w8xx+C6CHJgcpWQJtRB85oZ+mx41q1xbbY/aj3JElDKDMwufqNp1JhCQVE7TsSe4URNnzSHTzFjfSqjsNsePdarh3hJRDvHYf0Z520Phmj6fwqnv5ppTJoiaurimUhxJ1inXNNX1GMVKMAqFpHYRZoqAoHGyU3K1qcLQfhKAG2itRuq8BO+tw1IqXaJDYZoUESA1YiCBmCOFs6gUUAINtNhsV1b4Qf0YB0wAZVRgKTlE2gprcpfODzIszqM69nQYd4VJU1/SVWESFXJPe0kdXWHybJVNYgJgBNGqpiJhAksGOUYTRTCVaZe3s5jpYcoxMMKISWNxrCV3lw4OFizOdOgHafiCtGIBjQTWbEhobmWkrKJZVqjxATQDi6wazTCHDTVn6FYGajWBc2wxMHKB2rFQYwQBKdhhu2nYSKss0Nwx0ugFaUeKT0ZMJPZBeuaNjkEBrbjMRz9AYxeglQBNAkW5oFo2EiGwpeFrPY2l9s/z5WQGBbTectw/hY8XFq1874cnKA7EizKbTaZYumE2m3ksdbxN2r8i1Bh2s69SL+2ZQqBYrnJas+xTLZqROJSMoawzmPW2o2cO1sYwyn5uXkqxP9DNcNFm3mw2Ve7NvCkfc9YpbysHnUIUaHMgSbUfOOgqTdCOhSi/CNFj7YKBSFH9cRMYmh68cOa89vfB1Dc9/h8=</diagram></mxfile>
2012.01524/paper_text/intro_method.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Topic models (Steyvers and Griffiths, 2007) have been popularly used to extract abstract topics which occur commonly across documents in a corpus. Each topic is interpreted as a group of semantically coherent words that represent a common concept. In addition to gaining insights from unstructured texts, topic models have been used in several tasks of practical importance such as learning text representations for document classification (Nan et al., 2019), keyphrase extraction (Wang et al., 2019b), understanding reviews for e-commerce recommendations (Jin et al., 2018), semantic similarity detection between texts (Peinelt et al., 2020) etc.
4
+
5
+ Early works on topic discovery include statistical methods such as Latent Semantic Analysis (Deerwester et al., 1990), Latent Dirichlet Allocation (LDA) (Blei et al., 2003) which approximates each topic as a probability distribution over word vocabulary (known as topic-word distribution) and performs approximate inference over documenttopic and topic-word distributions through Variational Bayes. This was followed by Markov Chain Monte Carlo (MCMC) (Andrieu et al., 2003) based inference algorithm - Collapsed Gibbs sampling (Griffiths and Steyvers, 2004). These methods require an expensive iterative inference step which has to be performed for each document. This was circumvented through introduction of deep neural networks and Variational Autoencoders (VAE) (Kingma and Welling, 2013), where variational inference can be performed in single forward pass.
6
+
7
+ Neural variational inference topic models (Miao et al., 2017; Ding et al., 2018; Srivastava and Sutton, 2017) commonly convert a document to Bagof-Words (BoW) determined on the basis of frequency count of each vocabulary token in the document. The BoW input is processed through an MLP followed by variational inference which samples a latent document-topic vector. A decoder network then reconstructs original BoW using latent document-topic vector through topic-word distribution (TWD). VAE based neural topic models can be categorised on the basis of prior enforced on latent document-topic distribution. Methods such as NVDM (Miao et al., 2016), NTM-R (Ding et al., 2018), NVDM-GSM (Miao et al., 2017) use the Gaussian prior. NVLDA and ProdLDA (Srivastava
8
+
9
+ <sup>\*</sup>equal contribution
10
+
11
+ <sup>†</sup>work done during summer internship at Adobe
12
+
13
+ [and Sutton,](#page-10-5) [2017\)](#page-10-5) use approximation to the Dirichlet prior which enables model to capture the fact that a document stems from a sparse set of topics.
14
+
15
+ However, improving document encoding in topic models in order to capture document distribution and semantics better has not been explored much. In this work, we build upon VAE based topic model and propose a novel framework TAN-NTM: *Topic Attention Networks for Neural Topic Modeling* which process the sequence of tokens in input document through an LSTM [\(Hochreiter and](#page-9-5) [Schmidhuber,](#page-9-5) [1997\)](#page-9-5) whose contextual outputs are attended using Topic-Word Distribution (TWD). We hypothesise that TWD (being learned by the model) can be factored in the attention mechanism [\(Bahdanau et al.,](#page-8-1) [2014\)](#page-8-1) to enable the model to attend on the tokens which convey topic related information and cues. We perform separate attention for each topic using its corresponding word probability distribution and obtain the topic-wise context vectors. The learned word embeddings and TWD are used to devise a mechanism to determine topic weights representing the proportion of each topic in the document. The topic weights are used to aggregate topic-wise context vectors. The composed context vector is then used to perform variational inference followed by the BoW decoding. We perform extensive ablations to compare TAN-NTM variants and different ways of composing the topicwise context vectors.
16
+
17
+ For evaluation, we compute commonly used NPMI coherence [\(Aletras and Stevenson,](#page-8-2) [2013\)](#page-8-2) which measures the extent to which most probable words in a topic are semantically related to each other. We compare our TAN-NTM model with several state-of-the-art topic models (statistical [\(Blei](#page-9-2) [et al.,](#page-9-2) [2003;](#page-9-2) [Griffiths and Steyvers,](#page-9-3) [2004\)](#page-9-3), neural VAE [\(Srivastava and Sutton,](#page-10-5) [2017;](#page-10-5) [Wu et al.,](#page-11-1) [2020\)](#page-11-1) and non-variational inference based neural model [\(Nan et al.,](#page-10-1) [2019\)](#page-10-1)) outperforming them on three benchmark datasets of varying scale and complexity: 20Newsgroups (20NG) [\(Lang,](#page-10-7) [1995\)](#page-10-7), Yelp Review Polarity and AGNews [\(Zhang et al.,](#page-11-2) [2015\)](#page-11-2). We verify that our model learns better document feature representations and latent document-topic vectors by achieving a higher document classification accuracy over the baseline topic models. Further, topic models have previously been used to improve supervised keyphrase generation [\(Wang](#page-11-0) [et al.,](#page-11-0) [2019b\)](#page-11-0). We show that TAN-NTM can be adapted to modify topic assisted keyphrase generation achieving SOTA performance on StackExchange and Weibo datasets. Our contributions can be summarised as:
18
+
19
+ - We propose a document encoding framework for topic modeling which leverages the topicword distribution to perform attention effectively in a topic aware manner.
20
+ - Our proposed model achieves better NPMI coherence (∼9-15 percentage improvement over the scores of existing best topic models) on various benchmark datasets.
21
+ - We show that the topic guided attention results in better latent document-topic features achieving a higher document classification accuracy than the baseline topic models.
22
+ - We show that our topic model encoder can be adapted to improve the topic guided supervised keyphrase generation achieving improved performance on this task.
23
+
24
+ # Method
25
+
26
+ In this section, we describe the details of our framework where we leverage the topic-word distribution to perform topic guided attention over tokens in a document. Given a collection $\mathcal{C}$ with |C| documents $\{\mathbf{x_1}, \mathbf{x_2}, ..., \mathbf{x_{|C|}}\}$ , we process each document $\mathbf{x}$ into BoW vector $\mathbf{x}_{bow} \in \mathbb{R}^{|V|}$ and as a token sequence $\mathbf{x}_{seq}$ , where V represents the vocabulary. As shown in step A in figure 1, each word $w_j \in \mathbf{x}_{seq}$ is embedded as $\mathbf{e}_j \in \mathbb{R}^E$ through an embedding layer $\mathbf{E} \in \mathbb{R}^{|V| \times E}$ (E = Embedding Dimension) initialised with GloVe (Pennington et al., 2014). The embedded sequence $\{e_j\}_{j=1}^{|\mathbf{x}|}$ ,
27
+
28
+ where $|\mathbf{x}|$ is the number of tokens in $\mathbf{x}$ , is processed through a sequence encoder LSTM (Hochreiter and Schmidhuber, 1997) to obtain the corresponding hidden states $\mathbf{h}_j \in \mathbb{R}^H$ and cell states $\mathbf{s}_j \in \mathbb{R}^H$ (step B in figure 1):
29
+
30
+ $$\mathbf{h}_{i}, \mathbf{s}_{i} = f_{LSTM}(\mathbf{e}_{i}, (\mathbf{h}_{i-1}, \mathbf{s}_{i-1}))$$
31
+
32
+ where H is LSTM's hidden size. We construct a memory bank $\mathbf{M} = \langle \mathbf{h}_1, \mathbf{h}_2, ..., \mathbf{h}_{|\mathbf{x}|} \rangle$ which is then used to perform topic-guided attention (step C in figure 1). The output vector of the attention module is used to derive prior distribution parameters $\mathbf{z}_{\mu}$ & $\mathbf{z}_{\log \sigma^2}$ (as in VAE) through two linear layers. Using the re-parameterisation trick, we sample the latent document-topic vector **z**, which is then given as input to BoW decoder linear layer D that outputs the reconstructed BoW $\mathbf{x}_{rec}$ (step D in figure 1). Objective function is same as in VAE setting, involving a reconstruction loss term between $\mathbf{x}_{rec}$ & $\mathbf{x}_{bow}$ and KL divergence between the prior (laplace approximation to Dirichlet prior as in ProdLDA) and posterior. We now discuss the details of our Topic Attention Network.
33
+
34
+ We intend the model to attend on document words in a manner such that the resultant attention is distributed according to the semantics of the topics relevant to the document. We hypothesize that this can enable the model to encode better document features while capturing the underlying latent document-topic representations. The topic-word distribution $T_{\mathbf{w}}$ represents the affinity of each topic towards words in the vocabulary (which is used to interpret the semantics of each topic). Therefore, we factor $\mathbf{T}_{\mathbf{w}} \in \mathbb{R}^{K \times |V|}$ into the attention mechanism, where K denotes the number of topics. The topic-aware attention encoder and topic-word distribution influence each other during training which consequently results in convergence to better topics as discussed in detail in Experiments section.
35
+
36
+ Specifically, we perform attention on document sequence of tokens for each topic using the embedded representation of the topics $\mathbf{T_E} \in \mathbb{R}^{K \times E}$ :
37
+
38
+ $$T_E = T_w E$$
39
+ , [topic embeddings]
40
+
41
+ $T_{\mathbf{w}} = \operatorname{softmax}(\mathbf{D}), \quad \text{[topic-word distribution]}$
42
+
43
+ where $\mathbf{D} \in \mathbb{R}^{K \times V}$ is the decoder layer which is used to reconstruct $\mathbf{x}_{bow}$ from the sampled latent
44
+
45
+ document-topic representation $\mathbf{z}$ as the final step D in Figure 1. The topic embeddings are then used to determine the attention alignment matrix $\mathbf{A} \in \mathbb{R}^{|\mathbf{x}| \times K}$ between each topic $k \in \{1, 2, ..., K\}$ and words in the document such that:
46
+
47
+ $$\mathbf{A}_{jk} = \frac{\exp(score((\mathbf{T}_{\mathbf{E}})_k, \mathbf{h}_j))}{\sum_{j'=1}^{|\mathbf{x}|} \exp(score((\mathbf{T}_{\mathbf{E}})_k, \mathbf{h}_{j'}))},$$
48
+
49
+ $$score((\mathbf{T_E})_k, \mathbf{h}_i) = \mathbf{v_A}^{\top} tanh(\mathbf{W_A}[(\mathbf{T_E})_k; \mathbf{h}_i])$$
50
+
51
+ where $\mathbf{v_A} \in \mathbb{R}^P$ , $\mathbf{W_A} \in \mathbb{R}^{P \times (E+H)}$ , $(\mathbf{T_E})_k \in \mathbb{R}^E$ is the embedded representation of the $k^{th}$ topic and ; is the concatenation operation. We then determine topic-wise context vector corresponding to each topic as:
52
+
53
+ $$\mathbf{C_T} = \sum_{j=1}^{|\mathbf{x}|} \mathbf{A}_j \otimes \mathbf{h}_j, \quad \text{[topic-wise context matrix]}$$
54
+
55
+ where $\otimes$ denotes outer product. Note that $\mathbf{A}_j \in \mathbb{R}^K$ $(j^{th} \text{ row of matrix } \mathbf{A})$ is a K - dimensional vector and $\mathbf{h}_j$ is a H - dimensional vector, therefore $\mathbf{A}_j \otimes \mathbf{h}_j$ for each j yields a matrix of order $K \times H$ , hence $\mathbf{C}_{\mathbf{T}} \in \mathbb{R}^{K \times H}$ . The final aggregated context vector $\mathbf{c}$ is computed as a weighted average over all rows of $\mathbf{C}_{\mathbf{T}}$ (each row representing each topic specific context vector) with document-topic proportion vector $\mathbf{t}_{\mathbf{d}}$ as weights:
56
+
57
+ $$\mathbf{c} = \sum_{k=1}^K (\mathbf{t_d})_i (\mathbf{C_T})_k$$
58
+
59
+ where, $(\mathbf{t_d})_k$ is a scalar, $(\mathbf{C_T})_k \in \mathbb{R}^H$ denotes the $k^{th}$ row of matrix $\mathbf{C_T}$ & $\mathbf{t_d}$ is the document-topic distribution which signifies the topic proportions in a document. To compute it, we first normalize the document BoW vector $\mathbf{x}_{bow}$ and embed it using the embedding matrix $\mathbf{E}$ , followed by multiplication with topic embedding $\mathbf{T_E} \in \mathbb{R}^{K \times E}$ :
60
+
61
+ $$\mathbf{x}_{norm} = \frac{\mathbf{x}_{bow}}{\sum_{i=1}^{|V|} (\mathbf{x}_{bow})_i}, \; \; [ ext{normalized BoW}]$$
62
+
63
+ $\mathbf{x}_{emb} = \mathbf{x}_{norm}^{\top} E,$ [document embedding]
64
+
65
+ $\mathbf{t_d} = \operatorname{softmax}(\mathbf{T_E}, \mathbf{x}_{emb}), [document-topic dist.]$
66
+
67
+ where $\mathbf{x_{norm}} \in \mathbb{R}^{|V|}$ , $\mathbf{x_{emb}} \in \mathbb{R}^{E}$ & $\mathbf{t_d} \in \mathbb{R}^{K}$ . The context vector $\mathbf{c}$ is the output of our topic guided attention module which is then used for sampling the latent documents-topic vector followed by the BoW decoding as done in traditional VAE based topic models.
68
+
69
+ We call this framework as Weighted-TAN or W-TAN where the context vector c is a weighted sum of topic-wise context vectors. We also propose another model called Top-TAN or T-TAN where we use context vector of the topic with largest proportion in t<sup>d</sup> as c. It has been experimentally observed that doing so yields a model which generates more coherent topics. First, we find the index m of most probable topic in td. The context vector c is then the row corresponding to index m in matrix CT.
2012.07791/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2012.07791",
3
+ "month": "2020_12",
4
+ "year": 2021,
5
+ "conference": "CVPR",
6
+ "title": "img2pose: Face Alignment and Detection via 6DoF, Face Pose Estimation",
7
+ "arxiv_url": "https://arxiv.org/abs/2012.07791",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.07791",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/tex_files_extracted/2012.07791",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.07791/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.07791/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.07791/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.07791/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2012.15355/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2012.15355",
3
+ "month": "2020_12",
4
+ "year": 2021,
5
+ "conference": "ACL",
6
+ "title": "Optimizing Deeper Transformers on Small Datasets",
7
+ "arxiv_url": "https://arxiv.org/abs/2012.15355",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.15355",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/tex_files_extracted/2012.15355",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.15355/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.15355/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.15355/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.15355/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2101.04904/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2101.04904",
3
+ "month": "2021_01",
4
+ "year": 2021,
5
+ "conference": "ICLR",
6
+ "title": "EEC: Learning to Encode and Regenerate Images for Continual Learning",
7
+ "arxiv_url": "https://arxiv.org/abs/2101.04904",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.04904",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/tex_files_extracted/2101.04904",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.04904/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.04904/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.04904/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.04904/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "ok",
26
+ "copy_png": "ok",
27
+ "diagram_pdf": "ok",
28
+ "intro_method": "ok",
29
+ "paper_pdf": "ok",
30
+ "latex": "ok"
31
+ }
32
+ }
2101.09178/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2101.09178",
3
+ "month": "2021_01",
4
+ "year": 2021,
5
+ "conference": "AAAI",
6
+ "title": "Estimating α-Rank by Maximizing Information Gain",
7
+ "arxiv_url": "https://arxiv.org/abs/2101.09178",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.09178",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/tex_files_extracted/2101.09178",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.09178/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.09178/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_01/main_diagram_database/2101.09178/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2101.09178/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2104.04466/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2104.04466",
3
+ "month": "2021_04",
4
+ "year": 2021,
5
+ "conference": "EMNLP",
6
+ "title": "Knowledge-Aware Graph-Enhanced GPT-2 for Dialogue State Tracking",
7
+ "arxiv_url": "https://arxiv.org/abs/2104.04466",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04466",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/tex_files_extracted/2104.04466",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04466/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04466/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04466/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04466/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2104.04923/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2104.04923",
3
+ "month": "2021_04",
4
+ "year": 2021,
5
+ "conference": "NAACL",
6
+ "title": "Non-Autoregressive Semantic Parsing for Compositional Task-Oriented Dialog",
7
+ "arxiv_url": "https://arxiv.org/abs/2104.04923",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04923",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/tex_files_extracted/2104.04923",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04923/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04923/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.04923/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.04923/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2105.01203/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2105.01203",
3
+ "month": "2021_05",
4
+ "year": 2024,
5
+ "conference": "ECCV",
6
+ "title": "Physical-Based Event Camera Simulator",
7
+ "arxiv_url": "https://arxiv.org/abs/2105.01203",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.01203",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/tex_files_extracted/2105.01203",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.01203/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.01203/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.01203/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.01203/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2105.03491/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2105.03491",
3
+ "month": "2021_05",
4
+ "year": 2021,
5
+ "conference": "ICML",
6
+ "title": "Uniform Convergence, Adversarial Spheres and a Simple Remedy",
7
+ "arxiv_url": "https://arxiv.org/abs/2105.03491",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.03491",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/tex_files_extracted/2105.03491",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.03491/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.03491/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.03491/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.03491/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2105.12774/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2105.12774",
3
+ "month": "2021_05",
4
+ "year": 2021,
5
+ "conference": "AAAI",
6
+ "title": "Dynamic to Static Lidar Scan Reconstruction Using Adversarially Trained Auto Encoder",
7
+ "arxiv_url": "https://arxiv.org/abs/2105.12774",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.12774",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/tex_files_extracted/2105.12774",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.12774/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.12774/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.12774/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.12774/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2105.14491/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2105.14491",
3
+ "month": "2021_05",
4
+ "year": 2022,
5
+ "conference": "ICLR",
6
+ "title": "How Attentive are Graph Attention Networks? ",
7
+ "arxiv_url": "https://arxiv.org/abs/2105.14491",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.14491",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/tex_files_extracted/2105.14491",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.14491/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.14491/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_05/main_diagram_database/2105.14491/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2105.14491/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2106.00794/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-02-02T04:13:44.652Z" agent="5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" version="14.2.7" etag="T_BtYUhQaDWu6sFK-W3X" type="google"><diagram id="m5aC8IQ4UEtQDcMbW3Mh">7Vzbdps4FP0aP5YFkrg9Jo49zaxOmzaZSeeRGMVmFRsP4MTp14/EzUKSbYKFQ1P8kMABjkB76yBtHTGC4+X2j9hbL/6KfByOgO5vR/BqBICLLPKXGl5yg2Wg3DCPAz83GTvDbfATF0a9sG4CHye1E9MoCtNgXTfOotUKz9KazYvj6Ll+2mMU1ktde3MsGG5nXiha7wM/XeRWx9R39o84mC/Kkg29OLL0ypMLQ7Lw/OiZMcHJCI7jKErzreV2jENad2W95NdN9xytbizGq7TJBSC/4MkLN8WzFfeVvpQPi1f+Ba0zsjcLvSQJZiN4uUiXITEYZJOUFL98JzsfdE3XK8u/xKJrpmmWhiuKvl7tvbB7NzgOljjFcWH0vWSB/aKAJI2jH1VFg8oyjsIozu4RTqeOrtMLH6NVOvWWQUjdf8ThE06DmUcO5I+FfQHYXU0ZVf0T3uKI3E/8Qk553iFcArxgwC1tMQ69NHiqu/cKos0rd1UJN1FACgZ60SZsXdfKyi9aBbBdzYX67mfVnSbRJp7hwg+LMe8a6YJfx3Z3v7rb1IvnOBXckg2mWnamjFBycsGBXL0hV40B0LE0HTEMMFsyC7qa7fCuu6ITGujUDzohJMYqR29HIeSIwck1maBndkUnc6BTb+kEHaQ5+ukBimcX9Wt3FqCs44yKo83Kr+B9XgQpvl17M3r0mXSV6/QKvQcc3kRJkAbRinKQ4EV5cvmEY4pt+Ik7IY2oBy8M5tLTL4oDD1GaRstDlPEekijcpPginhW9b8owr9qDupScxfOT4vD2tVzbFw9gAT3Dxar3zJLRMffzrobpAQBtAcBv3sqPluSh/REt1ApJaZcPcQ1U678N7a5ntfkhySrogpxgoPU2q5PyONma0//PUfwDx9mtJgSQJS6I15one2GkBwrEyAirIayPQRgy8eXSvLKnphB5VtEKq0Hc4gAve6sM4NX4qQa4fjrgjgD4fUwa02pOA+bWW65DMsw8GRumNh+z30HM6tU/ubAuLUuGZR01ObRdYQZgHTRXxMyVQGYrgMxtBBltq2SbVkz2t0XTFdpt5XL1kNB/eLsOvZVHY69akkyz37sjiWHZZ2NJqRQxNLnbpFEceCEXyDlsK7rUoP66CX72KkbvI0ydF7ZjI2vaLcTCSACJL+zOUDYElMeku07f1ZjasrZ5LtyaNLW+4GYZQLMMDjegicg5XSHXQHh8fV/50pv9mGeXcVVGaiq9CmI8KzrKYRq36V8vA9+nd3e8i12d+er3fzMe1fmCJq4xHnfIF1e3S3Lsf+Oj6hyWLdBUQRdRSpxs1+SJ3qSl97EXDa1Gkbiz9qxSnKu0kUJIOSKj7AVu3wBVkEJyCWDEDgl6o44Y/PiordJW9YmqkTXnSJ0QYqjU1t6UDWWdvUM6gLoj2CEdGghjvwYd3HfLBpNjg9EdG0SV7WxsEEHfyw+8DdLvZXFkmymA7O38053S/R4G5SD0NKBAIKrvsKXcLuuE8IxUSCRRvWtPJIUzOBVziEdQZ49tHOQP3eG9nR7AekM10yajTrdOD8vRTMhM9LTMcaADWhOKri3GNeqMiKImeUJEI6xBHA8RasNDxT0h0DM2OaA+Im397nN1qCGLSYeou7V3OQ7qqVN2wrqijoPsgTpHqUPzYeyW7zzKHtMS3HWWtgBEzVMpYyB03oYxvWEH5EZGQG8dWBzNZV5uDu+WVDbzfgKdUQao7iiZdc7ovz1lDFdzwS5pqS6tG8DWDJvJabLb88k2udhlAKQxJXeYMCXN58wn0iiUowYTq0A6sfqPFwZ+oeDm/sit5C7L2TmOrVTPr/NTqquyImxhaq7cy+TiuqB8eM7BZ+YbnnGSZpenXmFxD04HHU7lOkkodrhcUGBKEmwsIJPygb6/XTQViiVZnMnSy07JKjG7UaDDHkLeIMlO9aQcNJpNmStBRibavq55G0DWvP/8+/bueno9vri7/vL512nh+tEWzjXfPPweIkk2k1Q8YznlVM4sKcqfo28iToIxHCCQyCg9K2eRTOttwSJZ4tzlxe3k0/Xnye/CIZYaDTmlgEAIaGWQLgmkuwKBZJ0pJfyRqcM53MnaW53EH2VEHH/7cn+1l4WVOb/hd0rOo1PloANyOpBTly0xtHXGTJnc3DdmTr7fTL7dDdR8C2rW5WdoOeejpkoBeljLcoIQwM3HImAqWWjnuprLjvFdrhRLYxfLdLegU1Srv25Im38k4EgTsPoRSRpndikIA26964QMU5JVKV3PoCIQQJXi8BAITgkEBtD4+SlT45tm0/ZPZ05trutD1/AemDlV2OoVyMev0HoP889uNRXad7YozOoibwqDeVNwq7NdR2OF6O7EYqhy8f8vkeXTGzLx81d00Sti+wctpx+g9Tq/CskkisbDOr0mvRHk2Brk8hnk6zwkcr+KvHAoisoDck2Q4z/aUi2jOAdoooY7gNYGNOkKjK5AE4XTYYnMsSUy0lbV0RIZKOqHw5LUsyxJlaHc1ZJU2ECKGz4C8sqPgEAoaqmdfQQEiSKXgOCQVt6j8Y4krRzaRldp5Znrs6SVI1HMG1JldrhwqTIINAvyKmRWpEARG2RWFW1flFmh46iUWaHjnklmRaJiNoy6mvUWBJEDItm0S1dDLyTKU8PQ62hqoxwiNd1ysrv7hnLeFncfooaT/wE=</diagram></mxfile>
2106.00794/main_diagram/main_diagram.pdf ADDED
Binary file (38.4 kB). View file
 
2106.00794/paper_text/intro_method.md ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ <figure id="fig:protocols" data-latex-placement="th">
4
+ <img src="pipeline-2.png" />
5
+ <figcaption>The initial pool of crowdworkers are randomly assigned to one of four protocols and the datasets are collected in parallel.</figcaption>
6
+ </figure>
7
+
8
+ Crowdsourcing is a scalable method for constructing examples for many natural language processing tasks. Platforms like Amazon's Mechanical Turk give researchers access to a large, diverse pool of people to employ [@howe2006rise; @snow-etal-2008-cheap; @callison-burch-2009-fast]. Given the ease of data collection with crowdsourcing, it has been frequently used for collecting datasets for natural language understanding (NLU) tasks like question answering [@mihaylov-etal-2018-suit], reading comprehension [@rajpurkar-etal-2016-squad; @huang-etal-2019-cosmos], natural language inference [@dagan2005pascal; @bowman-etal-2015-large; @williams-etal-2018-broad; @nie-etal-2020-adversarial], and commonsense reasoning [@talmor-etal-2019-commonsenseqa].
9
+
10
+ There has been substantial research devoted to studying crowdsourcing methods, especially in the human-computer interaction literature [@kittur-etal-2008-crowdsourcing; @kittur-2011-crowdforge; @bernstein-2012-analytic]. However, most prior research investigates methods for collecting accurate *annotations* for existing data, for example labeling objects in images or labeling the sentiment of sentences [@hsueh-etal-2009-data; @liu-2019-interactive; @sun-etal-2020-improving]. There are some small-scale studies that use writing tasks, like writing product reviews, to compare crowdsourcing methodologies [@dow-2012-shepherd]. However, we are unaware of any prior work that directly evaluates the effects of crowdsourcing protocol design choices on the quality of the resulting data for NLU tasks.
11
+
12
+ Decisions around methodology and task design used to collect datasets dictate the quality of the data collected. As models become stronger and are able to solve existing NLU datasets, we have an increasing need for difficult, high-quality datasets that are still reliably solvable by humans. As a result, our thresholds for what makes a dataset acceptable become stricter: The data needs to be challenging, have high human-agreement, and avoid serious annotation artifacts [@gururangan-etal-2018-annotation]. To make collecting such large-scale datasets feasible, making well-informed crowdsourcing design decisions becomes crucial.
13
+
14
+ Existing NLP datasets have been crowdsourced with varying methods. The prevailing standard is to experiment with task design during pilots that are run before the main data collection [@vaughan2018making]. This piloting process is essential to designing good crowdsourcing tasks with clear instructions, but the findings from these pilots are rarely discussed in published corpus papers, and the pilots are usually not large enough or systematic enough to yield definitive conclusions. In this paper, we use a randomized trial to directly compare crowdsourcing methodologies to establish general best practices for NLU data collection.
15
+
16
+ We compare the efficacy of three types of crowdsourcing interventions that have been used in previous work. We use multiple-choice question answering in English as a testbed for our study and collect four small datasets in parallel including a baseline dataset with no interventions. We choose QA as our test-bed over the similarly popular testbed task of natural language inference (NLI) because of our focus on very high human-agreement examples which calls for minimizing label ambiguity. In multiple-choice QA, the correct label is the answer choice that is *most likely to be correct*, even if there is some ambiguity in whether that choice is genuinely true . In NLI however, if more than one label is plausible, then resolving the disagreement by ranking labels may not be possible [@pavlick-kwiatkowski-2019-inherent]. In the trial, crowdworkers are randomly assigned to one of four protocols: [baseline]{.smallcaps}, [justification]{.smallcaps}, [crowd]{.smallcaps}, or [expert]{.smallcaps}.[^3] In [baseline]{.smallcaps}, crowdworkers are simply asked to write question-answering examples. In [justification]{.smallcaps}they are tasked with also writing explanations for their examples, prompting self-assessment. For the [expert]{.smallcaps} and [crowd]{.smallcaps} protocols, we train workers using an iterative process of collecting data, sending feedback, and qualifying high performing workers to subsequent rounds. We use expert-curated evaluations in [expert]{.smallcaps}, and crowdsourced evaluations in [crowd]{.smallcaps} for generating feedback and assigning qualifications. We use a a standard of high pay and strict qualifications for all protocols. We also validate the data to discard ambiguous and unanswerable examples. The experimental pipeline is sketched in Figure [1](#fig:protocols){reference-type="ref" reference="fig:protocols"}.
17
+
18
+ To quantify the dataset difficulty, we collect additional label annotations to establish human performance on each dataset and compare these to model performance. We also evaluate the difficulty of the datasets for typical machine learning models using IRT [@Baker1993ItemRT; @lalor-etal-2016-building].
19
+
20
+ We find that the [expert]{.smallcaps} protocol dataset is the most challenging. The human--model gap with RoBERTa$_{\textsc{large}}$ [@liu2019roberta] on the unanimous agreement portion of [expert]{.smallcaps} is 13.9 percentage point, compared to 7.0 on the [baseline]{.smallcaps} protocol. The gap with UnifiedQA [@khashabi-etal-2020-unifiedqa] is 6.7 on [expert]{.smallcaps}, compared to 2.9 on [baseline]{.smallcaps}. However, the [crowd]{.smallcaps} evaluation data is far less challenging than [expert]{.smallcaps}, suggesting that expert evaluations are more reliable than crowdsourced evaluations for sending feedback and assigning qualifications.
21
+
22
+ We also find that the [justification]{.smallcaps} intervention is ineffective as a stand-alone method for increasing NLU data quality. A substantial proportion of the explanations submitted are duplicates, reused for multiple examples, or give trivial reasoning that is not specific to the example.
23
+
24
+ Lastly, to evaluate the datasets for serious annotation artifacts we test the guessability of answers by omitting the questions from the model input. This partial-input baseline achieves the lowest accuracy on [expert]{.smallcaps}, showing that the interventions used to successfully boost example difficulty may also reduce annotation artifacts.
25
+
26
+ # Method
27
+
28
+ While crowdsourcing makes it easy to collect large datasets quickly, there are some clear pitfalls: Crowdworkers are generally less knowledgeable than field experts about the requirements the data needs to meet, crowdwork can be monotonous resulting in repetitive and noisy data, and crowdsourcing platforms can create a "market for lemons\" where fast work is incentivized over careful, creative work because of poor quality requesters [@akerlog-1978-market; @chandler-2013-risks].
29
+
30
+ @daniel2018quality give a broad overview of the variables at play when trying to crowdsource high-quality data, discussing many strategies available to requesters. Motivated by the use of self-assessment in teaching @boud-1995, @dow-2012-shepherd study the effectiveness of self-assessment and external assessment when collecting data for product reviews. They find that both strategies are effective for improving the quality of submitted work. However, @gadiraju2017using find that crowdworker self-assessment can be unreliable since poor-performing workers overestimate their ability. @drapeau-2016-microtalk test a justify-reconsider strategy: Crowdworkers justify their annotations in a relation extraction task, they are shown a justification written by a different crowdworker, or an expert, and are asked to reconsider their annotation. They find that this method significantly boosts the accuracy of annotations.
31
+
32
+ Another commonly used strategy when crowdsourcing NLP datasets is to only qualify workers who pass an initial quiz or perform well in preliminary crowdsourcing batches [@Wang2013PerspectivesOC; @cotterell-callison-burch-2014-multi; @ning-etal-2020-torque; @shapira2020evaluating; @roit-etal-2020-controlled]. In addition to using careful qualifications, @roit-etal-2020-controlled send workers feedback detailing errors they made in their QA-SRL annotation. Writing such feedback is labor-intensive and can become untenable as the number of workers grows. @dow-2011-managing design a framework of promoting crowdworkers into "shepherding roles\" to crowdsource such feedback. We compare expert and crowdsourced feedback in our [expert]{.smallcaps} and [crowd]{.smallcaps} protocols.
33
+
34
+ We run our study on Amazon Mechanical Turk.[^4] At launch, crowdworkers are randomly assigned to one of four data collection protocols, illustrated in Figure [1](#fig:protocols){reference-type="ref" reference="fig:protocols"}.[^5] To be included in the initial pool, workers need to have an approval rating of 98% or higher, have at least 1,000 approved tasks, and be located in the US, the UK, or Canada.
35
+
36
+ This task is used for collecting question-answer pairs in the crowdsourcing pipeline for all four protocols. Crowdworkers assigned to the [baseline]{.smallcaps} protocol are presented with only this task.
37
+
38
+ In this writing task, we provide a context passage drawn from the Open American National Corpus [@ide-suderman-2006-integrating].[^6] Inspired by @hu-etal-2020-ocnli, we ask workers to write two questions per passage with four answer choices each. We direct workers to ensure that the questions are answerable given the passage and that there is only one correct answer for each question. We instruct them to limit word overlap between their answer choices and the passage and to write distracting answer choices that will seem plausibly correct to someone who hasn't carefully read the passage. To clarify these criteria, we provide examples of good and bad questions.
39
+
40
+ Workers assigned to the [justification]{.smallcaps} protocol are given the writing task described above (Section [3.1](#sec:baseline){reference-type="ref" reference="sec:baseline"}) and are also tasked with writing a 1--3 sentence explanation for each question. They are asked to explain the reasoning needed to select the correct answer choice, mentioning what they think makes the question they wrote challenging.
41
+
42
+ Workers assigned to the [crowd]{.smallcaps} and [expert]{.smallcaps} protocols are directed to a tutorial upon assignment. The tutorial consists of two quizzes and writing tasks. The quizzes have four steps. In each step workers are shown a passage, two question candidates and are asked to select which candidate (i) is less ambiguous, (ii) is more difficult, (iii) is more creative, or (iv) has better distracting answer choices. These concepts are informally described in the writing task instructions, but the tutorial makes the rubric explicit, giving crowdworkers a clearer understanding of our desiderata. We give workers immediate feedback on their performance during the first quiz and not the second so that we can use it for evaluation. Lastly, for the tutorial writing tasks, we provide two passages and ask workers to write two questions (with answer choices) for each passage. These questions are graded by three experts[^7] using a rubric with the same metrics described in the quiz, shown in Figure [2](#fig:rubric){reference-type="ref" reference="fig:rubric"}. We give the qualification to continue onto the writing tasks to the top 60% of crowdworkers who complete the tutorial. We only qualify the workers who wrote answerable, unambiguous questions, and we qualify enough workers to ensure that we would have a large pool of people in our final writing round.
43
+
44
+ <figure id="fig:rubric" data-latex-placement="t">
45
+
46
+ <figcaption>The grading rubric used to evaluate examples submitted during the intermediate writing rounds in the <span class="smallcaps">expert</span> and <span class="smallcaps">crowd</span> protocols.</figcaption>
47
+ </figure>
48
+
49
+ After passing the tutorial, workers go through three small rounds of writing tasks. At the end of each round, we send them feedback and qualify a smaller pool of workers for the next round. We only collect 400--500 examples in these intermediate rounds. At the end of each round, we evaluate the submitted work using the same rubric defined in the tutorial. In the [expert]{.smallcaps} protocol, three experts grade worker submissions, evaluating at least four questions per worker. The evaluation annotations are averaged and workers are **qualified** for the next round based on their performance. The qualifying workers are sent a message with **feedback** on their performance and a bonus for qualifying. Appendix [7](#app:feedback){reference-type="ref" reference="app:feedback"} gives details on the feedback sent.
50
+
51
+ Evaluating the examples in each round is labor-intensive and challenging to scale (avg. 30 expert-min. per worker). In the [crowd]{.smallcaps} protocol we experiment with crowdsourcing these evaluations. After the first intermediate writing round in [crowd]{.smallcaps}, experts evaluate the submitted work. The evaluations are used to qualify workers for the second writing round *and* to promote the top 20% of workers into a feedback role. After intermediate writing rounds 2 and 3, the promoted workers are tasked with evaluating all the examples (no one evaluates their own work). We collect five evaluations per example and use the averaged scores to send feedback and qualify workers for the subsequent round.
52
+
53
+ For both [crowd]{.smallcaps} and [expert]{.smallcaps} protocols, the top 80% of workers are requalified at the end of each round. Of the 150 workers who complete the tutorial, 20% qualify for the final writing round. Our qualification rate is partly dictated by a desire to have a large enough pool of people in the final writing task to ensure that no dataset is skewed by only a few people [@geva-etal-2019-modeling].
54
+
55
+ We aim to ensure that our pay rate is at least US \$15/hr for all tasks. The total cost per question, excluding platform fees, is \$1.75 for the [baseline]{.smallcaps} protocol and \$2 for [justification]{.smallcaps}. If we discard all the data collected in the intermediate writing rounds, the cost is \$3.76 per question for [expert]{.smallcaps},[^8] and \$5 for [crowd]{.smallcaps}.
56
+
57
+ The average pay given during training to workers that qualify for the final writing task in [expert]{.smallcaps} is about \$120/worker (with an estimated 6--7 hours spent in training). In [crowd]{.smallcaps}, there is an additional cost of \$85/worker for collecting crowdsourced evaluations. The cost per example, after training, is \$1.75 per question for both protocols, and total training cost does not scale linearly with dataset size, as one may not need twice as many writers for double the dataset size. More details on our payment and incentive structure can be found in Appendix [8](#app:payment){reference-type="ref" reference="app:payment"}.
58
+
59
+ ::: table*
60
+ Dataset *N* Human RoBERTa $\Delta$ UniQA $\Delta$
61
+ ----------------------------------------------------------------------------- -------- ------- ------------ ---------- ------- ----------
62
+ [baseline]{.smallcaps}  *1492* \- 88.8 (0.2) \- 93.6 \-
63
+ [justification]{.smallcaps}  *1437* \- 86.5 (0.6) \- 91.4 \-
64
+ [crowd]{.smallcaps}  *1544* \- 81.8 (0.7) \- 88.1 \-
65
+ [expert]{.smallcaps}  *1500* \- 81.3 (0.6) \- 87.7 \-
66
+ *Results on the 10-way annotated subset*
67
+ [baseline]{.smallcaps}  *482* 95.9 87.2 (0.8) 8.7 92.5 3.3
68
+ [justification]{.smallcaps}  *471* 95.5 86.7 (1.0) 8.9 90.9 **4.7**
69
+ [crowd]{.smallcaps}  *472* 94.8 83.5 (1.0) 11.3 90.5 **4.3**
70
+ [expert]{.smallcaps}  *464* 92.8 80.6 (1.1) **12.2** 89.8 3.0
71
+ *High agreement ($>$`<!-- -->`{=html}80%) portion of 10-way annotated data*
72
+ [baseline]{.smallcaps}  *436* 97.7 89.3 (0.8) 8.4 94.0 3.7
73
+ [justification]{.smallcaps}  *419* 97.8 89.5 (0.6) 8.3 93.1 **4.8**
74
+ [crowd]{.smallcaps}  *410* 96.8 86.2 (0.9) 10.6 93.6 3.2
75
+ [expert]{.smallcaps}  *383* 98.2 84.7 (1.3) **13.5** 92.9 **5.3**
76
+ *Unanimous agreement portion of 10-way annotated data*
77
+ [baseline]{.smallcaps}  *340* 99.1 92.1 (0.7) 7.0 96.2 2.9
78
+ [justification]{.smallcaps}  *307* 98.7 93.2 (0.3) 5.5 95.8 2.9
79
+ [crowd]{.smallcaps}  *277* 98.6 88.9 (0.9) 9.7 97.1 1.4
80
+ [expert]{.smallcaps}  *271* 99.3 85.4 (1.1) **13.9** 92.5 **6.7**
81
+ :::
82
+
83
+ We collect label annotations by asking crowdworkers to pick the correct answer choice for a question, given the context passage. In addition to the answer choices written by the writer, we add an *Invalid question / No answer* option. We validate the data from each protocol. For [crowd]{.smallcaps} and [expert]{.smallcaps}, we only validate the data from the final large writing rounds. Data from all four protocols is shuffled and we run a single validation task, collecting either two or ten annotations per example.
84
+
85
+ We use the same minimum qualifications as the writing task (Section [3](#sec:protocols){reference-type="ref" reference="sec:protocols"}), and require that workers first pass a qualification task. The qualification task consists of 5 multiple-choice QA examples that have been annotated by experts.[^9] People who answer at least 3 out of 5 questions correctly receive the qualification to work on the validation tasks. Of the 200 crowdworkers who complete the qualification task, 60% qualify for the main validation task. Following @ho2015incentivizing, to incentivize higher quality annotations, we include expert labeled examples in the validation task, constituting 10% of all examples. If a worker's annotation accuracy on these labeled examples falls below 50%, we remove their qualification (7 workers are disqualified through this process), conversely workers who label these examples correctly receive a bonus.
86
+
87
+ @pavlick-kwiatkowski-2019-inherent show that annotation disagreement may not be noise, but could be a signal of true ambiguity. @nie-etal-2020-learn recommend using high-human-agreement data for model evaluation to avoid such ambiguity. To have enough annotations to filter the data for high human agreement and to estimate human performance, we collect ten annotations for 500 randomly sampled examples per protocol.
88
+
89
+ We pay \$2.50 for the qualification task and \$0.75 per pair of questions for the main validation task. For every 3 out of 4 expert-labeled examples a worker annotates correctly, we send a \$0.50 bonus.
90
+
91
+ We collect around 1,500 question-answer pairs from each protocol design: 1,558 for [baseline]{.smallcaps}, 1,534 for [justification]{.smallcaps}, 1,600 for [crowd]{.smallcaps}, and 1,580 for [expert]{.smallcaps}. We use the validation annotations to determine the gold-labels and to filter out examples: If there is no majority agreement on the answer choice, or if the majority selects *invalid question*, the example is discarded ($\sim5\%$ of examples). For the 2-way annotated data, we take a majority vote over the two annotations plus the original writer's label. For the 10-way annotated data, we sample four annotations and take a majority vote over those four plus the writer's vote, reserving the remainder to compute an independent estimate of human performance.
92
+
93
+ For the 10-way annotated subsets of the data, we take a majority vote over the six annotations that are *not* used when determining the gold answer, and compare the result to the gold answer to estimate human performance. Table [\[tbl:human-model-gap\]](#tbl:human-model-gap){reference-type="ref" reference="tbl:human-model-gap"} shows the result for each dataset. The [expert]{.smallcaps} and [crowd]{.smallcaps} datasets have lower human performance numbers than [baseline]{.smallcaps} and [justification]{.smallcaps}. This is also mirrored in the inter-annotator agreement for validation, where Krippendorf's $\alpha$ [@krippendorff1980content] is 0.67 and 0.71 for [expert]{.smallcaps} and [crowd]{.smallcaps}, compared to 0.81 and 0.77 for [baseline]{.smallcaps} and [justification]{.smallcaps} (Table [2](#tbl:agreement){reference-type="ref" reference="tbl:agreement"} in Appendix [9](#app:agreement){reference-type="ref" reference="app:agreement"}). The lower agreement may be reflective of the fact that while these examples are still clearly human solvable, they are more challenging than those in [baseline]{.smallcaps} and [justification]{.smallcaps} As a result, annotators are prone to higher error rates, motivating us to look at the higher agreement portions of the data to determine true dataset difficulty. And while the agreement rate is lower for [expert]{.smallcaps} and [crowd]{.smallcaps}, more than 80% of the data still has high human-agreement on the gold-label, where at least 4 out of 5 annotators agree on the label. The remaining low-agreement examples may have more ambiguous questions, and we follow @nie-etal-2020-learn's ([-@nie-etal-2020-learn]) recommendation and focus our analysis on the high-agreement portions of the dataset.
94
+
95
+ We test two pretrained models that perform well on other comparable QA datasets: RoBERTa$_{\textsc{large}}$ [@liu2019roberta] and UnifiedQA-v2 [@khashabi-etal-2020-unifiedqa]. We fine-tune RoBERTa$_{\textsc{large}}$ on RACE [@lai-etal-2017-race], a large-scale multiple-choice QA dataset that is commonly used for training [@sun-etal-2019-improving]. We fine-tune 6 RoBERTa$_{\textsc{large}}$ models and report the average performance across runs. The UnifiedQA-v2 model is a single T5-based model that has been trained on 15 QA datasets.[^10] We also fine-tune RoBERTa$_{\textsc{large}}$ on CosmosQA and QuAIL, finding that zero-shot model performance is best with RACE fine-tuning but that the trends in model accuracy across our four datasets are consistent (Appendix [10](#app:zero-shot){reference-type="ref" reference="app:zero-shot"}).
96
+
97
+ As shown in Table [\[tbl:human-model-gap\]](#tbl:human-model-gap){reference-type="ref" reference="tbl:human-model-gap"}, model accuracy on the full datasets is lowest for [expert]{.smallcaps}, followed by [crowd]{.smallcaps}, [justification]{.smallcaps}, and then [baseline]{.smallcaps}. However, model accuracy alone does not tell us how much headroom is left in the datasets. Instead, we look at the difference between the estimated human performance and model performance.
98
+
99
+ The trends in the human--model gap on the 10-way annotated sample are inconsistent across models. For a more conclusive analysis, we focus on the higher-agreement portions of the data where label ambiguity is minimal.
100
+
101
+ On the high agreement section of the datasets, both models' performance is weakest on [expert]{.smallcaps}. RoBERTa$_{\textsc{large}}$ shows the second largest human--model gap on [crowd]{.smallcaps}, however for UnifiedQA [justification]{.smallcaps} is the next hardest dataset. This discrepancy between the two types of iterative feedback protocols is even more apparent in the unanimous agreement portion of the data. On the unanimous agreement examples, both models show the lowest performance on [expert]{.smallcaps} but Unified-QA achieves near perfect performance on [crowd]{.smallcaps}. This suggests that while the [crowd]{.smallcaps} protocol used nearly the same crowdsourcing pipeline as [expert]{.smallcaps}, the evaluations done by experts are a much more reliable metric for selecting workers to qualify and for generating feedback, at the cost of greater difficulty with scaling to larger worker pools. This is confirmed by inter-annotator agreement: Expert agreement on the rubric-based evaluations has a Krippendorf's $\alpha$ of 0.65, while agreement between crowdworker evaluations is 0.33.
102
+
103
+ Model performance on the unanimous agreement examples of [justification]{.smallcaps} is comparable to, or better than, performance on [baseline]{.smallcaps}. To estimate the quality of justifications, we manually annotate a random sample of 100 justifications. About 48% (95% CI: \[38%, 58%\]) are duplicates or near-duplicates of other justifications, and of this group, nearly all are trivial (e.g. *Good and deep knowledge is needed to answer this question*) and over half are in non-fluent English (e.g. *To read the complete passage to understand the question to answer.*). On the other hand, non-duplicate justifications are generally of much higher quality, mentioning distractors, giving specific reasoning, and rewording phrases from the passage (e.g. *Only #1 is discussed in that last paragraph. The rest of the parts are from the book, not the essay. Also the answer is paraphrased from "zero-sum\" to "one's gain is another's loss\"*). While we find that [justification]{.smallcaps} does not work as a stand-alone strategy, we cannot conclude that self-justification would be equally ineffective if combined with more aggressive screening to exclude crowdworkers who author trivial or duplicate justifications. @gadiraju2017using also recommend using the accuracy of a worker's self-assessments to screen workers.
104
+
105
+ Since the datasets from some protocols are clearly more challenging than others, it prompts the question: are these datasets also better for training models? To test cross-protocol transfer, we fine-tune RoBERTa$_{\textsc{large}}$ on one dataset and evaluate on the other three. We find that model accuracy is not substantively better from fine-tuning on any one dataset (Table [4](#tbl:cross-validation){reference-type="ref" reference="tbl:cross-validation"}, Appendix [11](#app:cross-protocol){reference-type="ref" reference="app:cross-protocol"}). The benefit of [expert]{.smallcaps} being a more challenging evaluation dataset does not clearly translate to training. However, these datasets may be too small to offer clear and distinguishable value in this setting.
106
+
107
+ ::: {#tbl:partial-input-performance-race}
108
+ Partial input P + A Q + A A
109
+ ------------------------------ ------------ ------------ ------------
110
+ [baseline]{.smallcaps}  69.9 (4.7) 41.9 (2.9) 34.9 (2.4)
111
+ [justification]{.smallcaps}  57.9 (1.3) 38.3 (2.2) 33.9 (6.3)
112
+ [crowd]{.smallcaps}  57.7 (3.1) 43.9 (2.0) 35.2 (1.9)
113
+ [expert]{.smallcaps}  52.0 (1.5) 42.8 (1.8) 35.7 (1.4)
114
+
115
+ : Accuracy (std.) of partial input baselines. *P* is passage, *Q* is question, and *A* is answer choices.
116
+ :::
117
+
118
+ To test for undesirable artifacts, we evaluate partial input baselines [@kaushik-lipton-2018-much; @poliak-etal-2018-hypothesis]. We take a RoBERTa$_{\textsc{large}}$ model, pretrained on RACE, and fine-tune it using five-fold cross-validation, providing only part of the example input. We evaluate three baselines: providing the model with the passage and answer choices only, the question and answer choices only, and the answer choices alone. Results are shown in Table [1](#tbl:partial-input-performance-race){reference-type="ref" reference="tbl:partial-input-performance-race"}. The passage+answer baseline has significantly lower performance on the [expert]{.smallcaps} dataset in comparison to the others. This indicates that the iterative feedback and qualification method using expert assessments not only increases overall example difficulty but may also lower the prevalence of simple artifacts that can reveal the answer. Performance of the question+answer and answer-only baselines is comparably low on all four datasets.
119
+
120
+ <figure id="fig:answer-length" data-latex-placement="t!">
121
+ <img src="lengths_correct_vs_incorrect" />
122
+ <figcaption>Distribution of answer lengths. The distributions for different datasets and for the correct and incorrect answer options are plotted separately.</figcaption>
123
+ </figure>
124
+
125
+ We observe that the difficulty of the datasets is correlated with average answer length (Figure [3](#fig:answer-length){reference-type="ref" reference="fig:answer-length"}). The hardest dataset, [expert]{.smallcaps}, also has the longest answer options with an average of 9.1 words, compared to 3.7 for [baseline]{.smallcaps}, 4.1 for [justification]{.smallcaps}, and 6.9 for [crowd]{.smallcaps}. This reflects the tendency of the 1- and 2-word answers common in the [baseline]{.smallcaps} and [justification]{.smallcaps} datasets to be extracted directly from the passage. While sentence-length answers, more common in [expert]{.smallcaps} and [crowd]{.smallcaps}, tend to be more abstractive. Figure [3](#fig:answer-length){reference-type="ref" reference="fig:answer-length"} also shows that incorrect answer options tend to be shorter than correct ones. This pattern holds across all datasets, suggesting a weak surface cue that models could exploit. Using an answer-length based heuristic alone, accuracy is similar to the answer-only model baseline: 34.2% for [baseline]{.smallcaps}, 31.7% for [justification]{.smallcaps}, 31.5% for [crowd]{.smallcaps}, and 34.3% for [expert]{.smallcaps}.
126
+
127
+ We find that the questions in [expert]{.smallcaps} and [crowd]{.smallcaps} protocols have similar distributions of wh-words, with many *why* questions and few *who* or *when* questions compared to the [baseline]{.smallcaps} and [justification]{.smallcaps} protocols, seemingly indicating that this additional feedback prompts workers to write more complex questions.
128
+
129
+ We also observe that many questions in the datasets are formulaic and include no passage-specific content, for instance *Which of the following is true?*, *What is the main point of the passage?*, and *Which of the following is not mentioned in the passage?*. We manually annotate 200 questions from each protocol for questions of this kind. We find that there is no clear association between the dataset's difficulty and the frequency of such questions: 15% of questions in [expert]{.smallcaps} are generic, compared to 4% for [crowd]{.smallcaps}, 10% for [justification]{.smallcaps}, and 3% for [baseline]{.smallcaps}. We might expect that higher quality examples that require reading a passage closely would ask questions that are specific rather than generic. But our results suggest that difficulty may be due more to the subtlety of the answer options, and the presence of distracting options, rather than the complexity or originality of the questions.
130
+
131
+ We elicit two questions per passage in all four protocols with the hypothesis that the second question may be more difficult on aggregate. However, we find that there is only a slight drop in model accuracy from the first to second question on the [crowd]{.smallcaps} and [expert]{.smallcaps} datasets (1.0 and 0.7 percentage points). And model accuracy on [baseline]{.smallcaps} remains stable, while it increases by 2.7 percentage points on [justification]{.smallcaps}. A task design with minimal constraints, like ours, does not prompt workers to write an easier question followed by a more difficult one, or vice versa.
132
+
133
+ Individual examples within any dataset can have different levels of difficulty. To better understand the distribution of difficult examples in each protocol, we turn to Item Response Theory [IRT; @Baker1993ItemRT], which has been used to estimate individual example difficulty based on model responses [@lalor-etal-2019-learning; @MARTINEZPLUMED201918]. Specifically, we use the three-parameter logistic (3PL) IRT model, where an example is characterized by discrimination, difficulty, and guessing parameters. Discrimination defines how effective an example is at distinguishing between weak and strong models, difficulty defines the minimum ability of a model needed to obtain high performance, and the guessing parameter defines the probability of a correct answer by random guessing. Following @Vania21IRT, we use 90 Transformer-based models fine-tuned on RACE, with varying ability levels, and use their predictions on our four datasets as responses. For comparison, we also use model predictions on QuAIL and CosmosQA. Refer to Appendix [12](#app:irt){reference-type="ref" reference="app:irt"} for more details.
134
+
135
+ <figure id="fig:irt-difficulty" data-latex-placement="t!">
136
+ <img src="difficulty_all.png" />
137
+ <figcaption>Distribution of examples according to their difficulty parameters. <span class="smallcaps">crowd</span>/<span class="smallcaps">expert</span>-<span class="math inline">{1, 2, 3}</span> are the three intermediate rounds of data that are not included in the final datasets.</figcaption>
138
+ </figure>
139
+
140
+ Figure [4](#fig:irt-difficulty){reference-type="ref" reference="fig:irt-difficulty"} shows the distribution of example difficulty for each protocol. Also plotted are the difficulty parameters for the intermediate rounds of data that are collected in the iterative feedback protocols.[^11] We see that [expert]{.smallcaps} examples have the highest median and 75th percentile difficulty scores, while [baseline]{.smallcaps} scores the lowest. We also note that the greatest gain in difficulty for [crowd]{.smallcaps} examples happens between rounds 1 and 2, the only feedback and qualification stage that is conducted by experts. This offers further evidence that expert assessments are more reliable, and that crowdsourcing such assessments poses a significant challenge.
141
+
142
+ While the examples in [expert]{.smallcaps} have higher difficulty scores than the other protocols, the scores are significantly lower than those for CosmosQA and QuAIL (all four datasets show similar discrimination scores to CosmosQA and QuAIL). The data collection methods used for both CosmosQA and QuAIL differ substantially from methods we tested. @rogers-etal-2020-getting constrain the task design for QuAIL and require workers to write questions of specific types, like those targeting temporal reasoning. Similarly, in CosmosQA workers are encouraged to write questions that require causal or deductive commonsense reasoning. In contrast, we avoid dictating question type in our instructions. The IRT results here suggest that using prior knowledge to slightly constrain the task design can be effective for boosting example difficulty. In addition to differing task design, CosmosQA and QuAIL also use qualitatively different sources for passages. Both datasets use blogs and personal stories, QuAIL also uses texts from published fiction and news. Exploring the effect of source text genre on crowdsourced data quality is left to future work.
2106.03357/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2106.03357",
3
+ "month": "2021_06",
4
+ "year": 2021,
5
+ "conference": "NEURIPS",
6
+ "title": "Evaluating State-of-the-Art Classification Models Against Bayes Optimality",
7
+ "arxiv_url": "https://arxiv.org/abs/2106.03357",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03357",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.03357",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03357/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03357/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03357/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03357/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2106.03632/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2106.03632",
3
+ "month": "2021_06",
4
+ "year": 2021,
5
+ "conference": "NEURIPS",
6
+ "title": "Quantifying and Improving Transferability in Domain Generalization",
7
+ "arxiv_url": "https://arxiv.org/abs/2106.03632",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03632",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.03632",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03632/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03632/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.03632/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.03632/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2106.07630/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-10-05T16:44:03.677Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36" version="15.4.0" etag="Oj1MdU2tS7w6YJYYX2Q4" type="google"><diagram id="St6RI45leGJ7OTFQGGGN">7Vttc5s4EP41nmk/1CPA4uVj7cTXmbvrZNqbXntfOrJRsKYY+UBunP76W4F4ERKJ6+DW7dV5Q4tY0D67j1aLMvEW28NvOdlt/uQxTScuig8T72rius7MxfBHSu4riR+FlSDJWaw6tYK37AtVQqSkexbTQusoOE8F2+nCNc8yuhaajOQ5v9O73fJUv+uOJNQQvF2T1JT+zWKxqaQhRq38FWXJpr6zg9SZLak7KxXFhsT8rhKVfbzribfIORfV0fawoKk0Xm2XStFy4GzzYDnNxDEXuNUFn0m6V2NTzyXu68HmfJ/FVPZ3Jt78bsMEfbsja3n2DuAF2UZsU3X6lmdC4eVg1V7wlOelLm+xQPCRcpamtTzjGfSfk3ytrpQXxqTYNDctRM4/NaZ2G0lHszeXX3BGDYjmgh4GjeI0pgYfpXxLRX4PXdQFkUJLeacXqPZdi7Vfu+Kmg7M7U0Ki/CtpVLcQwIFCwY6IZyKCF8/gR/rO6nYSzN9PgquPSgA+CZJXIJm4fgrPMV/lcJTII+jy3CrVtP1j11Ze+1RPWJItS6UV/yIbviUW/8i5IILxDNpoHPCcEE1DHHU+GpjYMcH0gqmPTTijEdCcGWje5DRmawFmdNGcFKywQGQHrSPtoOL/u5dkUVr2RVGa9iV0cPDu0J7s6FnRhGWA8goAz9mh5zlPUNx41Ep6lICuIJTf46j/HHNRjK1Uf2b4C489X75wxrMKzeJzGNsW2EOhDnCXZisHCwOdvykD/BqOltUZwbZU2vb30SK/G+MjBPUMuVOsxfHMEsdR06kbxs4YcYyHWLlyS5vNYLRCN4w+aalpzzYTpiyRjLgG61CQz6XtGJDzS3Viy+JY3saKRIuVZFQOJ5mQRguRjo209xjYNNNjjUyATYa1zJb+CLD4Q7CIDRXkIxvfmbtwQdoRExrerm0Jib8OKVDLKCbGbmC4v2OZxkwjeyMYORg0ckkbNhODAsjFhzx0wLzBgLsqtWgMQ3pOz1ctZrT46hhmDB9PtWGyeCmXJ9BapXz9yUYfdRbsmc7olx+Qg54lS+20o/eDMbyXtp3iuvlBEUfZuDporXstTaNxb5UkSJ5QoTnNEdB0TI8tpq9lOU0hU/ys39GGh7rDDWeZ6CA/83Xk63atouD7fE3VVd0VU19R4PRjMeqpquxgqCodpBn4UT4T2UPvR8kEkyoTLFONZ+x5eYcfKStMzKxQG8vPniEuLzIZxKGrxx82U8HQMhc6eAQWr9PO02m8P+UN0/NXEP6BiQ6PQ+uDups8bllcNmoS1yeJAUqvWPFSKV2fzP3+LH00pYd6Botn+ChCB5DJfafbTnYojp+D6vu0rldpPHW2qHPCjm++ef0aBNfZmsewkug7ar2u+IOsaHrDC6ZKMc36YnDh0U1LNmQn9QmIffqFy4eZ72gOuWG5eGnlN63QyhoVqYEPuyiCOMMAqIODMJpJD2+rg06ZGEJ++U49QNp7+mbZZKyjOtWmF1G/DPnNsnlYkfYyCBjq1Fw0OQ6yrWe9nnOexGJH1H1HS0Ybbgp9r8tOU+R5DzMUNDpOY2GtYe4c4LOLIS/AcdotUgaB7hEYTVFgFDG/ltlgwfPYXfDwXcbLYx2zql0x0xX9WZkJ/YjMFBlltsixMRMKzsZMZsX8FzN9U2by/FB3geDEtMqLDEXT4xKrUxjGUqH95Tff02/w7MRJq+83oOiMfmMpIX/vstylI+1GwRR1Pm4PLrcP19HIu/qKDruGqhGRH6xrP/CmffnAm/anlkm+z9tyZ+oe8WohnAZnekPuXGBd/OID0B2pmO25vcoHOlsp2zFr2cu92OcUZLeUyKPCAP7S3qHOkCUqxwhD1ItBCDgz7fZtb6ZGyLlds6Z5QwqhAYNIFsPvJOUrkpYgEfE/BiyaGSUcSxHaMfHqJ9Mn4WXW+a63KxrHLEvkdiKylSbJVsWuHCx69oYm+5TkYIL4Uvcp9DHqQTgCZDh8fAeJNcLGQMysty1ZVgbSHBpIbQqDqf1SQ+oRfMz6xcDGzxGA9JFRP20rpd0qxexMGyIsezTVTr6ynrXd1RWfy4RS21p77IbdMTjz8W21ztkC8FuWlX6SLBMjHbBZeOqWCe8RRSdnmdBs98pX3dv/OPCu/wM=</diagram></mxfile>
2106.07630/main_diagram/main_diagram.pdf ADDED
Binary file (33.3 kB). View file
 
2106.07630/paper_text/intro_method.md ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Multivariate time series forecasting is a key problem in many domains such as retail demand forecasting [@bose2017probabilistic], financial predictions [@zhou2020domain], power grid optimization [@hyndman2009density], road traffic modeling [@li2017diffusion], and online ads optimization [@cui2011bid]. In many of these setting, the problem involves simultaneously forecasting a large number of possibly correlated time series for various downstream applications. In the retail domain, the time series may capture sales of items in a product inventory, and in power grids, the time series may correspond to energy consumption in a household. Often, these time series are arranged in a natural multi-level hierarchy - for example in retail forecasting, items are grouped into subcategories and categories, and arranged in a product taxonomy. In the case of power consumption forecasting, individual households are grouped into neighborhoods, counties, and cities. The hierarchical structure among the time series can usually be represented as a tree, with the leaf nodes corresponding to time series at the finest granularity, and the edges representing parent-child relationships. Figure [\[fig:hierarchy\]](#fig:hierarchy){reference-type="ref" reference="fig:hierarchy"} illustrates a typical hierarchy in the retail forecasting domain for time series of product sales.
4
+
5
+ ::: wrapfigure
6
+ r0.35 ![image](figures/hierarchy.png){width="35%"}
7
+ :::
8
+
9
+ In such settings, it is often required to obtain good forecasts, not just for the leaf level time-series (fine grained forecasts), but also for the aggregated time-series corresponding to higher level nodes (coarse gained forecasts). Furthermore, for interpretability and business decision making purposes, it is often desirable to obtain predictions that are roughly *coherent* or *consistent* [@thomson2019combining] with respect to the hierarchy tree. This means that the predictions for each parent time-series is equal to the sum of the predictions for its children time-series. More importantly, incorporating coherence constraints in a hierarchical forecasting model captures the natural inductive bias in most hierarchical datasets, where the ground truth parent and children time series indeed adhere to additive constraints. For example, total sales of a product category is equal to the sum of sales of all items in that category.
10
+
11
+ Some standard approaches for hierarchical forecasting include bottom-up aggregation, or reconciliation-based approaches. Bottom-Up aggregation involves training a model to obtain predictions for the leaf nodes, and then aggregate up along the hierarchy tree to obtain predictions for higher-level nodes. Reconciliation methods [@ben2019regularized; @hyndman2016fast; @wickramasuriya2015forecasting; @wickramasuriya2020optimal] make use of a trained model to obtain predictions for all nodes of the tree, and then, in a separate post-processing phase, *reconcile* (or modify) them using various optimization formulations to obtain coherent predictions. Both of these approaches suffer from shortcomings in term of either aggregating noise as one moves to higher level predictions (bottom-up aggregation), or not jointly optimizing the forecasting predictions along with the coherence constraints (for instance, reconciliation).
12
+
13
+ At the same time, there have been several recent advances on using Deep Neural Network models for multivariate forecasting, including Recurrent Neural Network (RNN) and Convolutional Neural Network (CNN) architectures [@salinas2020deepar; @sen2019think; @wang2019deep; @oreshkin2019n; @rangapuram2018deep], that have been shown to outperform classical time-series models such as autoregressive and exponential smoothing models [@mckenzie1984general; @hyndman2008forecasting], especially for large datasets. However, most of these approaches do not explicitly address the question of how to model the hierarchical relationships in the dataset. Deep forecasting models based on Graph Neural Networks (GNN) [@bai2020adaptive; @cao2021spectral; @yu2017spatio; @li2017diffusion; @wu2020connecting] do offer a general framework for learning on graph-structured data. However it is well known [@bojchevski2020scaling] that GNNs are hard to scale for learning on graphs with a very large number of nodes, which in real-world settings such as retail forecasting, could involve hundreds of thousands of time series. More importantly, a desirable practical feature for multi-variate forecasting models is to let the prediction of future values for a particular time series only require as input historical data from that time series (along with covariates), without requiring access to historical data from all other time series in the hierarchy. This allows for scalable training and inference of such models using mini-batch gradient descent, without requiring each batch to contain all the time series in the hierarchy. This is often not possible for GNN-based forecasting models, which require batch sizes of the order of the number of time series.
14
+
15
+ **Problem Statement:** Based on the above motivations, our goal is to design a hierarchical forecasting model with the following requirements: 1) The model can be trained using a single-stage pipeline on all the time series data, without any separate post-processing, 2) The model captures the additive coherence constraints along the edges of the hierarchy, 3) The model is efficiently trainable on large datasets, without requiring, for instance, batch sizes that scale with the number of time series.
16
+
17
+ In this paper, we propose a principled methodology to address all these above requirements for hierarchical forecasting. Our model comprises of two components, both of which can support coherence constraints. The first component is purely a function of the historical values of a time series, without distinguishing between the individual time series themselves in any other way. Coherence constraints on such a model correspond to imposing an additivity property on the prediction function - which constrains the model to be a linear autoregressive model. However, crucially, our model uses time-varying autoregressive coefficients that can themselves be nonlinear functions of the timestamp and other global features (linear versions of time-varying AR have been historically used to deal with non-stationary signals [@sharman1984time]). We will refer to this component as the *time-varying autoregressive model*.
18
+
19
+ The second component focuses on modeling the global temporal patterns in the dataset through identifying a small set of temporal *global basis functions*. The basis time-series, when combined in different ways, can express the individual dynamics of each time series. In our model, the basis time-series are encoded in a trained seq-2-seq model [@sutskever2014sequence] model in a functional form. Each time series is then associated with a learned embedding vector that specifies the weights for decomposition along these basis functions. Predicting a time series into the future using this model then just involves extrapolating the global basis functions and combining them using its weight vector, without explicitly using the past values of that time series. The coherence constraints therefore only impose constraints on the embedding vectors of each time series, which can be easily modeled by a hierarchical regularization function. We call this component a *basis decomposition model*. In Section [8.2](#sec:main_thm){reference-type="ref" reference="sec:main_thm"}, we also provide theoretical justification for how such hierarchical regularization using basis decomposition results in improved prediction accuracy.
20
+
21
+ We experimentally evaluate our model on multiple publicly available hierarchical forecasting datasets. We compare our approach to state-of-the-art (non-hierarchical) deep forecasting models, GNN-based models and reconciliation models, and show that our approach can obtain consistently more accurate predictions at all levels of the hierarchy tree.
22
+
23
+ # Method
24
+
25
+ We are given a set of $N$ coherent time series of length $T$, arranged in a pre-defined hierarchy consisting of $N$ nodes. At time step $t$, the time series data can be represented as a vector $\yv_t \in \Rb^N$ denoting the time series values of all $N$ nodes. We compactly denote the set of time series for all $T$ steps as a matrix $\Yv = \sbb{\yv_1, \cdots, \yv_T}^\top \in \Rb^{T \times N}$. Also define $\yv\idx{i}$ as the $i$th column of the matrix $\Yv$ denoting all time steps of the $i$ th time series, and $\yv_t\idx{i}$ as the $t$ th value of the $i$ th time series. We compactly denote the $H$-step history of $\Yv$ by $\Yv_\Hc = [\yv_{t-H}, \cdots, \yv_{t-1}]^\top \in \Rb^{H \times N}$ and the $H$-step history of $\yv\idx{i}$ by $\yv_\Hc\idx{i} = [\yv_{t-H}\idx{i}, \cdots, \yv_{t-1}\idx{i}] \in \Rb^H$. Similarly define the $F$-step future of $\Yv$ as $\Yv_\Fc = [\yv_t, \cdots, \yv_{t+F-1}]^\top \in \Rb^{F \times N}$. We use the $\widehat{\cdot}$ notation to denote predicted values, for example $\widehat{\Yv}_\Fc,\ \widehat{\yv}_\Fc$ and $\widehat{\yv}_t$.
26
+
27
+ Time series forecasts can often be improved by using features as input to the model along with historical time series. The features often evolve with time, for example, categorical features such as *type of holiday*, or continuous features such as *time of the day*. We denote the matrix of such features by $\Xv \in \Rb^{T\times D}$, where the $t$ th row denotes the $D$-dimensional feature vector at the $t$ time step. For simplicity, we assume that the features are *global*, meaning that they are shared across all time series. We similarly define $\Xv_\Hc$ and $\Xv_\Fc$ as above.
28
+
29
+ We assume that the time series data are coherent, that is, they satisfy the *sum constraints* over the hierarchy. The time series at each node of the hierarchy is the equal to the sum of the time series of its children, or equivalently, equal to the sum of the leaf time series of the sub-tree rooted at that node. Figure [\[fig:hierarchy\]](#fig:hierarchy){reference-type="ref" reference="fig:hierarchy"} shows an example of a sub-tree rooted at a node.
30
+
31
+ As a result of aggregation, the data can have widely varying scales with the values at higher level nodes being magnitudes higher than the leaf level nodes. It is well known that neural network training is more efficient if the data are similarly scaled. Hence, in this paper, we work with rescaled time series data. The time series at each node is downscaled by the number of leaves in the sub-tree rooted at the node, so that now they satisfy *mean constraints* rather than sum constraints described above. Denote by $\Lc(p)$, the set of leaf nodes of the sub-tree rooted at $p$. Hierarchically coherent data satisfy the following *data mean property*, $$\begin{equation}
32
+ \label{eqn:data_mean_property}
33
+ \yv\idx{p} = \frac{1}{|\Lc(p)|} \sum_{i \in \Lc(p)} \yv\idx{i} \quad \text{\emph{(Data Mean Property)}}.
34
+ \end{equation}$$
35
+
36
+ We now introduce the two components in our model, namely the *time-varying AR model* and the *basis decomposition model*. As mentioned in the introduction a combination of these two components satisfy the three requirements in our problem statement. In particular, we shall see that the coherence property plays a central role in both the components. For simplicity, in this section, all our equations will be for forecasting one step into the future ($F=1$), even though all the ideas trivially extend to multi-step forecasting. The defining equation of our model can be written as, $$\begin{align*}
37
+ \widehat{\yv}_{\Fc}^{(i)} &= f(\yv_\Hc\idx{i}, \Xv_\Hc, \Xv_\Fc, \Zv_\Hc, \theta_i) \\
38
+ & = \underbrace{\big\langle \yv_\Hc\idx{i}, a(\Xv_\Hc, \Xv_\Fc, \Zv_\Hc) \big\rangle}_{\text{Time varying AR (TVAR)}}
39
+ + \underbrace{\big\langle \theta_i, b(\Xv_\Hc, \Xv_\Fc, \Zv_\Hc) \big\rangle}_{\text{Basis decomposition (BD)}}. \addtocounter{equation}{1}\tag{\theequation}\label{eqn:base_form}
40
+ \end{align*}$$
41
+
42
+ In the above equation, $\Zv_\Hc$ is a latent state vector that contains some summary temporal information about the whole dataset, and $\theta_i$ is the embedding/weight vector for time-series $i$ in the basis decomposition model. $\Zv_\Hc$ can be a relatively low-dimensional temporally evolving variable that represents some information about the global state of the dataset at a particular time. We use the *Non-Negative Matrix Factorization* (NMF) algorithm by @gillis2013fast to select a small set of representative time series that encode the global state. If the indices of the selected representative time-series is denoted by $\{i_1, \cdots, i_R\}$ ($R$ denotes the *rank* of the factorization), then we define $\Zv = [\Yv\idx{i_1}, \cdots, \Yv\idx{i_R}] \in \Rb^{T\times R}$. Note that we only feed the past values $\Zv_\Hc$ as input to the model, since future values are not available during forecasting. Also, note that the final basis time-series can be a non-linear function of $\Zv_\Hc$. In our experiments, $R$ is tuned but is always much much less than $N$. $a$ and $b$ are functions not dependent on $\yv_\Hc\idx{i}$ and $\theta_i$. We will provide more details as we delve into individual components.
43
+
44
+ **Time-Varying AR (TVAR):** The first part of the expression in equation [\[eqn:base_form\]](#eqn:base_form){reference-type="ref" reference="eqn:base_form"} denoted by *Time Varying AR (TVAR)* resembles a linear auto-regressive model with coefficients $a(\Xv_\Hc, \Xv_\Fc, \Zv_\Hc) \in \Rb^H$, that are a function of the input features, and thus can change with time. The AR parameters of this model are shared across all time series and hence do not encode any time-series specific information, a drawback that is overcome by the Basis Decomposition part of our model. This component is *coherent by design* because it is a shared linear AR model. However, even though the AR weights are shared across all the time-series at a given time-point, they can crucially change with time, thus lending more flexibility to the model.
45
+
46
+ *Implementation:* In order to model the sequential nature of the data, we use an LSTM encoder to encode the past $\Xv_\Hc$ and $\Zv_\Hc$. Then, we use a fully connected (FC) decoder for predicting the auto-regressive weights. Similar to @wen2017multi's multi-horizon approach, we use a different head of the decoder for each future time step resulting in a $F$-headed decoder producing $F$-step predictions for TVAR weights. The decoder also takes as input the future covariates $\Xv_\Fc$ if available. The produced weights are then multiplied (inner product) to the history to produce the final TVAR predictions. We illustrate this architecture in Figure [1](#fig:full_arch){reference-type="ref" reference="fig:full_arch"} (right).
47
+
48
+ **Basis Decomposition (BD) with Hierarchical Regularization:** Now we come to the second part of our model in equation [\[eqn:base_form\]](#eqn:base_form){reference-type="ref" reference="eqn:base_form"}. As discussed before, this part of the model has per time-series adaptivity, as different time-series can have different embeddings. It resembles an expansion of the time series on a set of basis functions $b(\Xv_\Hc, \Xv_\Fc, \Zv_\Hc) \in \Rb^K$, with the basis weights/embedding for time series $i$ denoted by $\theta_i \in \Rb^K$. Both the basis functions and the time series specific weights are learned from the data, rather than fixing a specific form such as Fourier or Wavelet basis.
49
+
50
+ The idea of using a basis has also been recently invoked in the time-series literature [@sen2019think; @wang2019deep]. The basis recovered in the implementation of @wang2019deep is allowed to vary for each individual time-series and therefore is not a true basis. @sen2019think do explicitly recover an approximate basis in the training set through low-rank matrix factorization regularized by a deep global predictive model alternatingly trained on the basis vectors, thus not amenable to end-to-end optimization. We shall see that our model can be trained in an end-to-end manner.
51
+
52
+ *Embedding Regularization for Approximate Coherency:* The TVAR part of our model is coherent by design due to its linearity. The BD however requires the embeddings of the time-series to satisfy the mean property along the hierarchy: $$\begin{equation}
53
+ \theta_p = \frac{1}{|\Lc(p)|} \sum_{i \in \Lc(p)} \theta_i \quad \text{(\emph{Embedding Mean Property})},
54
+ \label{eqn:emb_mean_property}
55
+ \end{equation}$$ for the forecasts to be coherent. We impose this constraint approximately via an $\ell_2$ regularization on the embedding, $$\begin{equation}
56
+ E_\reg(\thetav) = \sum_{p=1}^N \sum_{i \in \Lc(p)} \| \theta_p - \theta_i\|_2^2.
57
+ \end{equation}$$ The purpose of this regularizer is two fold. Firstly, we observe that, when the leaf embeddings are kept fixed, the regularizer is minimized when the embeddings satisfy the mean property [\[eqn:emb_mean_property\]](#eqn:emb_mean_property){reference-type="eqref" reference="eqn:emb_mean_property"}, thus encouraging coherency in the predictions. Secondly, it also encodes the inductive bias present in the data corresponding to the hierarchical additive constraints. We provide some theoretical justification for this hierarchical regularization in Section [5](#sec:theory){reference-type="ref" reference="sec:theory"}.
58
+
59
+ *Implementation:* As before, we use an LSTM encoder to encode the past $\Xv_\Hc$ and $\Zv_\Hc$. Then, we use the encoding from the encoder along with the future features $\Xv_\Fc$ (sequential in nature) and pass them through an LSTM decoder to yield the $F$-step basis predictions which are then multiplied with the embedding (inner product) to produce the final BD predictions. We illustrate this architecture in the top right of Figure [1](#fig:full_arch){reference-type="ref" reference="fig:full_arch"}. Thus, a functional representation of the basis time-series is implicitly maintained within the trained weights of the basis generating seq-2-seq model. Note that the embeddings are also trained in our end-to-end model. We illustrate this architecture in Figure [1](#fig:full_arch){reference-type="ref" reference="fig:full_arch"} (left).
60
+
61
+ We emphasize that the main ideas in our model are agnostic to the specific type of neural network architecture used. For our experiments, we specifically use an LSTM architecture [@hochreiter1997long] for the encoder and decoder. Other types of architectures including transformers [@vaswani2017attention] and temporal convolution networks [@borovykh2017conditional] can also be used.
62
+
63
+ **Loss Function:** During training, we minimize the mean absolute error (MAE) of the predictions along with the embedding regularization term introduced above (our method generalizes to other losses too, such as mean square error, or mean absolute percentage error). For regularization weight $\lambda_E$, and $\widehat{\yv}\idx{i}_{\Fc}$ defined as Eq [\[eqn:base_form\]](#eqn:base_form){reference-type="eqref" reference="eqn:base_form"}, and $\Theta$ denoting the trainable parameters of $a, b$, our training loss function is, $$\begin{equation}
64
+ \label{eqn:train_loss}
65
+ \ell(\Theta, \thetav) =
66
+ \underbrace{\sum_{i} \sum_{\Fc} |\yv_{\Fc}\idx{i} - \widehat{\yv}_{\Fc}\idx{i}|}_{\text{Prediction loss}}
67
+ + \underbrace{\lambda_E E_\reg(\thetav)}_{\text{Embedding regularization}}.
68
+ \end{equation}$$ Note that the time-series dependent part of the loss function can be easily mini-batched and the embeddings are not memory intensive.
69
+
70
+ <figure id="fig:full_arch">
71
+ <p><img src="figures/basis_arch.png" style="height:20.6%" alt="image" /> <img src="figures/ar_arch.png" style="height:20.6%" alt="image" /></p>
72
+ <figcaption>In this figure we show the architectures of our two model components separately. On the left we show the BD model, where the seq-2-seq model implicitly maintains the basis in a functional form. Note that the time-series specific weights <span class="math inline">{<em>θ</em><sub><em>i</em></sub>}</span> are also trained. On the right, we show the TVAR model. The fully connected decoder has a different prediction head for each future time-point.</figcaption>
73
+ </figure>
74
+
75
+ In this section, we theoretically analyze the benefits of modeling hierarchical constraints in a much simplified setting, and show how it can result in provably improved accuracy, under some assumptions. Since analyzing our actual deep non-linear model for an arbitrary hierarchical set of time series can be complex, we make some simplifying assumptions to the problem and model. We assume that all the time series in the dataset is a linear combination of a small set of basis time series. That is, $\Yv = \Bv\thetav + \wv$, where $\Bv \in \Rb^{T \times K}$ denotes the set of basis vectors, $\thetav = [\theta_1, \cdots, \theta_N] \in \Rb^{K \times N}$ denotes the set of weight vectors used in the linear combination for each time series, and $\wv \in \Rb^{T \times N}$ denotes the noise matrix sampled i.i.d as $w \sim \Nc(0, \sigma^2)$ for the leaf nodes. A classical example of such a basis set can be a small subset of Fourier or Wavelet basis [@strang1993wavelet; @vanwavenet] that is relevant to the dataset. Note that we ignore the TVAR model for the sake of analysis and focus mainly on the BD model which includes the hierarchical regularization.
76
+
77
+ In this section, we consider a 2-level hierarchy of time-series, consisting of a single root node (indexed by $0$) with $L$ children (denoted by $\Lc(0)$). We will also assume that instead of learning the $K$ basis vectors $\Bv$ from scratch, the $K$ basis vectors are assumed to come from a much larger dictionary $\bar{\Bv}\in \Rb^{T \times D}$ of $D\ (\gg K)$ vectors that is fixed and known to the model. While the original problem learns the basis and the coefficients $\thetav$ simultaneously, in this case the goal is to select the basis from among a larger dictionary, and learn the coefficients $\thetav$ .
78
+
79
+ We analyze this problem, and show that under the reasonable assumption of the parent embedding $\theta_0$ being close to all the children embeddings $\theta_n$, using the hierarchical constraints can result in a mean-square error at the leaf nodes that is a multiplicative factor $L$ smaller than the optimal mean-square error of any model that does not use the hierarchical constraints. Our proposed [HiReD]{.smallcaps} model, when applied in this setting would result in the following (hierarchically) regularized regression problem: $$\begin{equation}
80
+ \min_{\thetav} \frac{1}{NT} \|\yv - \Bv\thetav\|_2^2 + \lambda \sum_{n \in\Lc(0)} \|\theta_0 - \theta_n\|_2^2.
81
+ \end{equation}$$ For the sake of analysis, we instead consider a two-stage version, described in Algorithm [\[alg:support_rec\]](#alg:support_rec){reference-type="ref" reference="alg:support_rec"} and Algorithm [2](#alg:param_rec){reference-type="ref" reference="alg:param_rec"}: we first recover the support of the basis using Basis Pursuit [@chen2001atomic]. We then estimate the parameters of the root node, which is then plugged-in to solve for the parameters of the children node. We also define the baseline (unregularized) optimization problem for the leaf nodes that does not use any hierarchical information, as $$\begin{equation}
82
+ \label{eqn:unreg_child}
83
+ \tilde{\theta}_n = \argmin_{\theta_n} \frac{1}{T}\|y_n - \Bv\theta_n\|_2^2 \quad \forall n \in \Lc(0).
84
+ \end{equation}$$ The basis support recovery follows from standard analysis [@wainwright2009sharp] detailed in Lemma [\[lem:support_recovery\]](#lem:support_recovery){reference-type="ref" reference="lem:support_recovery"} in the Appendix. We focus on the performance of Algorithm [2](#alg:param_rec){reference-type="ref" reference="alg:param_rec"} here. The following theorem bounds the error of the unregularized ($\tilde{\theta}_n$) and the hierarchically-regularized ($\widehat{\theta}_n$, see Algorithm [2](#alg:param_rec){reference-type="ref" reference="alg:param_rec"}) optimization solutions. A proof of the theorem can be found in Appendix [8.2](#sec:main_thm){reference-type="ref" reference="sec:main_thm"}.
85
+
86
+ <figure id="alg:param_rec">
87
+ <div class="minipage">
88
+ <div class="algorithm">
89
+ <p><span class="math inline">$\widehat{\alpha}_0 \gets \underset{\alpha\in \Rb^n}{\argmin} \frac{1}{2T}\|y_0-\bar{\Bv}\alpha\|_2^2 + \lambda_L\|\alpha\|_1$</span> Estimate support <span class="math inline"><em>Ŝ</em> = {<em>i</em> | |<em>α̂</em><sub>0</sub>| &gt; 0}</span> Estimate true basis <span class="math inline">$\Bv \gets \bar{\Bv}_{\widehat{S}}$</span></p>
90
+ </div>
91
+ </div>
92
+ <div class="minipage">
93
+ <div class="algorithm">
94
+ <p><span class="math inline">$\widehat{\theta}_0 \gets \argmin_{\theta_0} \frac{1}{T}\|y_0 - \Bv\theta_0\|_2^2$</span></p>
95
+ </div>
96
+ </div>
97
+ <figcaption>Parameter Recovery</figcaption>
98
+ </figure>
99
+
100
+ ::: theorem
101
+ []{#thm:main_thm label="thm:main_thm"} Suppose the rows of $\Bv$ are norm bounded as $\|\Bv_i\|_2 \le r$, and $\|\theta_n - \theta_0\|_2 \le \beta$. Define $\Sigma = \Bv^T\Bv/T$ as the empirical covariance matrix. For $\lambda_E = \frac{\sigma^2K}{T\beta^2}$, $\widetilde{\theta}_n$ and $\widehat{\theta}_n$ can be bounded as, $$\begin{equation}
102
+ \label{eqn:main_thm}
103
+ \Eb\|\widetilde{\theta}_n - \theta_n\|^2_\Sigma \le \frac{\sigma^2K}{T}, \quad\Eb\|\widehat{\theta}_n - \theta_n\|^2_\Sigma \le 3\frac{\sigma^2K}{T}\frac{1}{1 + \frac{\sigma^2 K}{T r^2 \beta^2}} + 6\frac{\sigma^2K}{TL}.
104
+ \end{equation}$$
105
+ :::
106
+
107
+ Note that $\|\widehat{\theta}_n - \theta_n\|_\Sigma^2 = \|\Bv(\widehat{\theta}_n - \theta_n)\|^2$ equals the training mean squared error. The gains due to the regularization can be understood by considering the case of a small $\beta$. Note that a small $\beta$ implies that the children time-series have structural similarities which is common in hierarchical datasets. As $\beta \to 0$, the hierarchically regularized estimator approaches an error of $\Oc(\frac{\sigma^2K}{TL})$ which is $L$ times smaller when compared to the unregularized estimator. In fact, if $1/\beta^2 = \omega(T)$, then the numerator $1 + \frac{\sigma^2 K}{T r^2 \beta^2}$ in Eq. [\[eqn:main_thm\]](#eqn:main_thm){reference-type="eqref" reference="eqn:main_thm"} is $\omega(1)$ resulting in $\Eb\|\widehat{\theta}_n - \theta_n\|^2_\Sigma = o(\frac{\sigma^2K}{T})$ which is asymptotically better than the unregularized estimate.
2106.09563/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2106.09563",
3
+ "month": "2021_06",
4
+ "year": 2022,
5
+ "conference": "ICLR",
6
+ "title": "On Anytime Learning at Macroscale",
7
+ "arxiv_url": "https://arxiv.org/abs/2106.09563",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.09563",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.09563",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.09563/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.09563/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.09563/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.09563/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2106.11613/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-01-20T11:18:27.162Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36" etag="kGI0gkjreD--F2OpUOrY" version="14.2.4" type="device"><diagram id="QDqIrWZdkDLLdq4FfIat" name="Page-1">7V1Zd6JK1/41uews5uESEAREQQFBb76FzIqAgAz++q9wSMdod6fPydRvn6ysRIqiLGo/e6hdu3Y9oNy2HRZOHo0zz08eEMhrH9DBA4LABIqBf31JdyrBYOhUEBaxd670vUCPD/658FJtH3t+eVWxyrKkivPrQjdLU9+trsqcosia62pBllx/a+6E/k2B7jrJbakVe1V0KqUQ8nu56MdhdPlmmKBPd7bOpfK5iTJyvKw5FR1fDuUfUK7Isur0adtyftIP3mVcTiMg/ODuU8cKP61e80B3OIRsUq7xfTjAVjw0VHf0Nxg/d7d2kv35lc/drbrLGBTZPvX8vhnoAWWbKK58PXfc/m4DqA7KomqbgCsYfEyclZ+wjrsJj49xWZIVx2bQ4PgDqpQbv3Kjc3NlVWSbp7HtWwjiJLn3WFg4Xgxe9nIvzVLQBTYDXYmrHl5k356TxGEKLlxQ0wfVWM8po2Pn4adve9Y6BJGCIPTfmqWV4GzjpG/JiLcAdAg08Rvwd5ZtHdAkezviZyLUflH57bOiMwWGfrb1q6IDVc53EfT0xJkbUOLMHs0Vth7xU2n0HFkUcUb1GdHhU+PfiQ4+nOn+Wxig/m4MCAJKQ9RnYQC5jPYrMIBC74QBmryhuO8BQXi+zIoqysIsdRL+eyl7jYnvdZQsy89jvfarqjtLdWdfZdc4AWNYdPb5+ePFor8Ar36+HLTPbw6689W/oVOZ7QvX/8lQnNmscorQr342ZKd6/TD9lOqFnzhVXF/rkzcnIPFrFgYKKO8/uvuV/2seXp2Iq6yeCp74Wd1XSdzz3bHcc4qN+sR/0COEXxcix1L4iJcKjETWcyZ9ZPsTLmDsw1gPvejtC+9hxA3vYfAjhN3yHoY8QtR7SWCE+JPY73il+QWg0lG8nmW4U1RMb3F9F8vHMiHuB+OZnL/Svv3Pe3M0/UqOvticX4Slby0zwXeqfeF/q7Jv+nEowe2B7wKju3hACGfbs3C6KvOn0bqrwOFfM/8L9es5PhW49whIuJS/Cj6MfzEEe8G/9CN+a0LBF0o+Z2AUfy/uxf4d98L/jHuvGM5NnLKM3Rc8d9faWVE4hkN3bCzK9V33vXnxQplfMuNFgX0RZrz0+xk3IjgK/1jLVsd/v2K0swqEHpG7Ri/i3uU6j1gROPFhXIcT0MUavfAdjjzCBPT9B7/hQfIeC0JvokMzY0ypMTDbhUk2bifLlnZ232D4hhQfoEJfQYC+yvl5YPn/K965o8jujsXXMk3hV7gYLlwTb4/+GLYHZ+wCQvQzSS0r47PduMqqKtu+bop5aYM5zwarnqasU+YnP1EQtz3l2eNXMpdS6FICPntO5TygzOkSEfI0fEC4eM6qswYaDcOMAT8T3Yx4MwSfhhT4w1YcswD/uXpg0AX4MMPMhJ/OZxjjoy5Njs3ZzB0uNJrtIqrM5qzJjvlSLpvFmGNGIy7LJNXhdxw3skxhPAKNjXU+XIQMGBmFr3JiX3SJ6xUJukT3+BiMrFAV1c7xD8GYro0VfkBrbqWKkSE5+2YkTuzVdumPonAShMZ8wIweEJaBfcZ0NG0e+1OLYnc0ExKer5xu9X/mjjYwY/DJnIPbFBMivl+A10uIwZwYsCaABGvBXdDQ+8OSJ5cV0i59MEIsArsTz/MwiyRJjFQEglIHh67jtKiu6z1H7W2locHHZAsqa6vVqqu1TGJt39+o4E1Urq7BP8yZbEl3OMEJgkAbqizriPB9P84oesICWIGfcI+ioqzsNduyKM3d29pgYAQuD5iLRVEUJ311JMvgQk4H68OqGVMmxSz9DCNrRKnQFR3jLhIs6hQm+54YhtFNsvWhxznC+gwzytTVeDubsPt913XUCIZhbFUU9X6/ZiukmDXtbG5HmqaqHT1OjQwOw4I5LEvUA010vhGD7xvsKkMK0gCUrKOIZNcthkteu6LamRcMeOr4ZYKtbxrMVARoNQ1FO0nm/pDw1DQi9nW93rlBsFryq2a1Ipv2IDf4GG5heD6PoN1MxvyRomAEk1X6fAR7gKPY8Uo12qABXDcJVCvovxuTFfWgaeATNe5HF0UsuN3Atb1YYD16mCaXHHmYlCv2cDhAqBX5ISOuwJD4Qk9TApcPB4IYjWe4lh5wyGOsZd8Mip5eAPx6IvgzMLg5A2N7jEUaWaNpLDu/YRZZ1Zb09gWbQY4sxQuGW0vzdOF5QYBCPMeHYOA6SUiDNLW9Yaht6cqq7SgNfF/3AM0FsbE3UoLo80KeA8nKBk9fTrYLDSuwgb3MQR8A9QWArpAl0NVko8M7OhBx/FSz3mY8s5kkY89mFiggpOM4oNiR1yh4xPC50+ODwQALbBul6NVkB42mCiiDep0lREv61NI6oWgaRnmXSVO06QDzYAmMEyP70ALKVsu12JiNRSVrm8CXXoDtRgZoW5ryWxs8fTgYvoisJmnbWOASl2Wl2Vm2HeXsAt2CkUBGQpmkpbMDL4mRgQZapfm6SjJj1C0bNAXFZozXe01RSGo8H+7mpjsgUWInd/B6eHyL068g4ovKapcLdpZvlqAnQbrhBxhjSnSxaehBBu3mStO//cxjh3ojTcI4OkKCGLpjX5NG/JATYWu2WyodtqYXQ0GoN+mBptu2hWaWyLu5ux2ZajtdW7yG7nBvT40AhrDVdAm+WxR5FgCOnS/WFjspOWmQbMBICGEdTputOzAmZRPUgMepNbpCCWJXzZpivOtrlIm+HUbgZQ94Zooi2jZ2mymO1lnQiFcXdMxVYAi38TpprdJXGLwZHA7NVBs4UQuoxA5Zc+3qPSq7iTxdyeiwZMYaM5aY2BKh7agbg0rcIIqmizM6ojW5YHcVsh2Kdljq6mCcQ4nSWVhpraRNm5QbbZuXXRRbmMITvRo8sGFozypqCwa0F5o7DKh+YUg1w2G6CpmFFyejPHZW0soRdHcOmzST4BShdKIbNiQlSvsigxh/NBy6vAxPTdAZOgT9Emb4gM0xRSZrY9rlMr4XgTTtpdROt+KpSU6hqYyTzonG4HdfoH46g4N6EtV2l86U+XRB7czRfEh3WY34JCGKg3Z3oDQ1mXiDie3s5BHu6b3waWdLPYiUQcdBfETReKHuNsWSWbXLqG52xAlKcTEM9xEawiqxgWAcwxYAuWwvLP1p2LlL0BILsTObnBbRfO07sWjbQOL5Yj+1B/1G7D08Vk2r1yhy3YyzNbw1Nx08EUwOdOFQw7AASOf4nrCjBWQiapAdNLhGPYG5Oww1fW8XEBHudI2BB11UZruyno0UNytHZZOmaRSvWLUJD5CpzC1vGQzZAlgD7GYQ8ocO0oqC73BCbg+HLAWCVhuPbd7gagLWtVlBsxZVzPdolHhA5sydrQfn2HJSu5MpUKLZfr+PgfqFRW3BdeAbREsCIga8P/jatleTC7ILJvORMqoqeDiJGL9EXctDgbwbr2NWcamdSCPb9bKZjXmSJuhe2Y1oYEgQJowi+sCQyqmqVfbGHw4UbDc11sGhXrrEZhxCGMttppoV4D6yX3ZhSRB7ZrjqSaMbRkOKou25qrrdbHohA7vRuKxYaq2ioV7HlVVIIQnpo8B1F0vMAeZZtiPHiRWGy8DBGENhx0kv9e11swtapsBJY6swZcSnSDjX7G1AEsVAEnKzFVCbct1A7JbjLcvlIj1fi8xhNmi4Qyi5VidmpuQtZ1bKRznedvoaIIDV4lVhZFNenxCCxpqQoJsYqZnLphOdeMhEpk/JfhBosmwzrLXsxlhOaMCA0yYT1GaU2KkOXjIcHnqeDuRmXWMwJ1iUnFC85RR7PJNJZozwuh06fqVv16ZkZJuQlrpIaAoWqHGr1kRScpFpotp4k7a5mvNaveTFRcHINI7nSdNMTXvG2jtbjeXKznuuWKzMYZbzSTtNe1ZXG7ZGHAexm3YYZWuvTVd2M40Pk1nF9fp+N9Y5ahcptc4xDISh3Rx0t01NhA1LjKKoDuOCqGdWXeJoMhsPMbzhfS9iW0ealtASMIlgDDTYHRpUyRDJqFyy29wdJ3Yx38GjasXVUuAs5U03iDoybg+lJGI8SbGgH51XjqXtdJn4BT1p1stpC9lKWbr+cGxpwxaoL1aYBW28qubLodHCY3klLJHksMCGpYE0a7OTq3acLwZ1cSDEVbeb2Hg09bL+vZRNZ/AILtSENAV4n64X8UjyJwjuqlqRZd0EK3p5RUWGg0/gKqEEZtvh2miauXycx2N2JsWMatW7/QrlOAZ0heVNbl3ZrY9EC3NyqGb7cL4dC5S4tkdDli+Y1GTyjPNG471XjLCxuBvqfCF1vW6PuU043QaQS3hH49GKAtHIrVwgQgVJtp4SY7vEGFUYyS6j6Ur3u0Z1R7HRPzvtxb2iutlBxrxc3k2GlULotZtWmTzRFhJclJ4iFfPSaszZKMSAtcQWEUG33NzzIrTuaDWYkAQ7wViA+cSo5vNwQ00dl8pNZpV7yRJgPGilfRO6/aObAudKHGCJ3Qnj5ZAfCajszEq7FgLU9PCSJA6yPRK4jFmDAZQhF9suJnrgzRtJczlncNh3+3g2TtusEUtX1sxFvIMSc6tnSGyrskiEHSNwi7mOCJbq+xE7qanQDZx8vFXV/S4jtD0mJHBe6GhTu13Fp+F2usIMXt8sZL0BOnXmtL7sRiNuBTsymA4vBKuJQH8jWVEKb0lYq0hNkTldMdxOPABty4a4u9UhrWw3UEPhPiNvEj7hMXGxZGcqQqkzOnF58DZx6LK5bmm73QxMl9jVRtPqOKQyJxvnnq9lk3bP4Lgt6nlTDghusCsTYdLOC4nScy3JTBRq1EDe7QfQKMwMV8XmUzlg+C2xAXAOfFWdbPltvoAP9mBrhJxlTzXbja2hRwcDt90cNNGabqgmFCZumZXrxu70ZQtPepoK02LI+Usq9bnIEqYTtsZhp6IdMXQwzWclpWEkn1gf4phTYwYohGmD+U7HLjgmZbglIQv6NlxlC0U33D2zgTyDOGTcYD0q442VEOQqmAypMeMWO2cXGpukX1sXBslh7FmgI7I1wFUgICjMHKGYtJU3e3DbHOSCIwLR0ZsrbG4JxuSw0wVRytaTXFxXbDIfSoMUCICDaY+HRG4zfLOTejO42cKTbYUHQjsJB73hkJn4APPVeAtx/ZKukCMMlcRjZo1sY24m73kOmav9DGcGMbjNQDCzKZoNBjMMReW4I3JxgaUW1MSUxE6Z1G8krK8dcFskap2FGhoG4WJ8hh+SmZeNdNLT0bxWETVx2YHXbabyPGbadgTTwtAlFwtvtBh45VAYKJwmm4SkY8xGTiR6Yat1o1rcNkLHU3WB+zu6DK1RM0AJRFdKD8dlQZztc7Ofje4hLbH2WStj6gCeAyyzS9P0mNLx8j00nHVoMLI1DqhhYTOmtEO1WFdLUcvM2Y5dCnE9XTqiczbcJ/sQWRnKiBtQ3qhhx3uqNxHdSLGVib5jZQhOpDJNGVMIFSt1lUwLVR9SucJDcqFJKG2G7gXGgvHdHPOieW9eTPHQsRxJbpiK0+vtKBkZclJgxs7rucleWiNsMmo285wdwGiwppPlQSbMApiYkqIPjWxkDlcqi2OFJ/m1mVScO5hlpD7jeY2IDxEr9Qaewh9q29ennD6sA4yEgHVhK3YYza3hjlx7cm4CASfMKYoxDlSqlOtD6HuMGAik6iaLhOIaHjFra4vPxjHWcDOyt10WIz5b45bEwZtVJm28naHXey+EcA6no8zqVjxP9nJQIMNx0FvlY1w2dn5Qr2sbCuaL4QHzfG5t9LLVdx1UmezNZUJNjBWfxuWA8tf7AVB4y2VM+XM2mhrZ3N8pc90OCBHab8dWPhvVHIXtR/uQVVLIX6anSV1v8E2BlV4QtbIJ7HAtTvOEZe1cC7IG5dt1WmyWNss0nZwShRsmIyvqFpNsxFBzeeusZvPaWuDBwZzgCg9ZMz9kTdmZm9sp45E858XQfBSSvg35wr6fIA8xY+lmNCcie6Nm1dSKl+XCzem0zFhZmWbbdidl05R3p9LUUEJ2Xm5ydkTkG03vxmIzRfR2vC7XSloyG4ZMqSryyHWg1UZvOxDKfNX7jkLHwxFlXkQw0zGEVAo9t3tDLhBrrlqJO79cjdQ4HCwEVJha0oJ3WhHWlzC+9r1p5kDzoz6Wj9pib7bNdJ+CBhJqU418njJWg8GmnpQ+N7B1FdfUMoKlZdeqJYVuF8DEgufMQpuOusGCFQcMKyaMZ/YGoxEbMj5kZ4dhbNm0sQuaYIfOZuBrJH4a7ihDXccaXQ7dnaOzwhq1CbRBG992Qt4pO4VdSLRv9fOeLTJO11TSd5AtNQZLFyYbj7mwEmJPldk54/a+B1nRUG7mUtAIaRgTIRayyXvDjN2ui3Hq0MNNFxksvc1MR419hc9lrz2s3SLk9SXHyCt+XzOkSMWcxZoF30pMW9FMVddNTQezuPJ5MMOHZEfaxvJooBhk04rjUjmYYWk0Y4kWmEI1HaClDow7t0c6HbMi4ytjZCY4Mqt6LL/pQssd5h60TbKZxqrhVKxAtxF+g+QzQU9MARvyDjY3q0DAHUfd6RN+IU/5LsxM3hzVGtdk4cw3D/XGJ+LZ2rayckMsN4VO23OSlbHFZLUxREvPIMKpZsYQoRdSpvDJekOMsdbqVHXnzbn20I9jPpsme688WBaSFmpvmCLqVOCcTbFNYY2R1X5CJaRe4PFbCPYwkVSH6HoRhViIa2GvekLysBjFLrkbMkkryM14PYkTVdCBfoSn9n6m7ZoQDHFjq75eWIIXCPVOGDFpRlI1MYMc2uqduyxVJ1WgEMWcSnJlQ1DWkZQIBkdDqdzUvOXis2a8x7Yl03s6wlAxuVWoyfowZ+aHNo23tgtUgEyOom4njXlhd+DVsm2V3k5W8kqP8MwJLSqcO1lMu/PYsua4uJUFdpPMQ29cr9m0Ime5EHAEM/TDispXZl50FcQZXD6bb+KdwVG4B8cFYKCZJBBUEXVJFQdDYOT3jNM7cTYzejeBM2xgs0Yo+kQ5qql2plfGwUMiwzOHc2nlLcZLl0NLfkr4ElAGQLbPyCmsiZtJsOkHe6rQOyBTKCVjiCFbG/5AXgQyDG2lfOI4xKYAxjh4qeWs4csC8iezpWkQejN0oHy25P26DZCdXQzk1ClbiEPDKgOzjG5hSOZSQUII7pl62ZL0whQWxd60dMGxRiqy8bqkaGSfQ9d72WOWMhFbMa3jedauMXuOVLhGgtlb40mQ3ElDm8oLjk3MIQ7bRXfYjHwOKThj0DsFAhXTdhWyyGizEL21JzLVkuZRTJ4iawYmhEmzset1tuMmtYc0ikcF815ICjXrJFlVM/nBH1YbXVl0BLPuoNG+WLmz3jwXmw2XhxindQOGSI0JZfN5TqUpr1NER/ZLCNBwpSx6kcsmyDyHpozFM+JWDMda7XgbDc7n4N1CcoIp09UaGWwhn5hINCFikFHEzDSaYLtlScF4p+Cr+VJfYZNwkqHhaDbT13vRS03wHcsw4GEG2RNS5rS2lvHb6OQocUWtn9OEK8/ZxULvFLSXO5E8EouUFaNg5sNCi4aDYbIJ9MjkMsDQZoMZxgoIq6Y3p7QJ0OVQmFqK0eq9oyypy/7NrQnOQqxvW3YP/7iapYFXADW7geI4X4q9mQAokmQupUv8SpwJeVyWoZGzcLoJCpI3sBSNmFEnq1t/2sKdupen2TpSV0TuG9MkDzIsobKAEp26ajt00QbSTuqXYvYmvE5a+pDkDKsgw3ALA505BQ1ANNIWPLmVbbc6kFPFZ21G7z0Vs8AaThbrsBVTSlJEl6BsjV+vFpK3Il29UJkEHkguo6uVvAvFbcyvq7gWCSucTODYtzbJMAVz2oE5rQZGwiS4E08XAJXhhOXgyX7B2fPdiG0BUhlIUMyODPPlPMOUIQNDgymUg54MX9kGSrGr3OWmKLyJV/3yCsPo5lydjXBuIUn9Ss95nYbNCs9/GU5yJ6DwY1ZKyZcLpX3Y0PMf9GahFIdvF0rxt1koxXzXHXhCx8zCWQdX36AKH327XXz7gGCFfxpqdIxmOLd6qnuJESd+SGb9WVjZVeDnuwY2IK8NbIAx6q2XZ4+PMkXhdM8q5FmcVuWzlrW+4BlWafgFWNFzGJrwgydIGP/5E+DDqRff0fn0Ov98/Ri5Db3QCt+L3eOa8EswA06trrF3Ew38cmF4G3veCeZ+GR+c1bGpHn3nIQTt4uwD3oucHtmXsMWPkyo4/khT13KFQh4R7JlcIV8VgIG8V/zw00t9p9HwKDRAmVHsQZ/+AjqRCPJS/P+aTNSHkgm5E8UE/x1RTCRK3I1i+sjIpftEwW6IwhVZWX7jgSrM8v4VFHD5N3AQgdwxoG4IdDe88914hsJ/Hb7UByHlbz1KT5vaznR6eL5v7L6egK61M0o/0s9/biNlUfiO9YnA5COBfBda7xbgfKvYpd6k7+Obz6HO4C6f/rkBzj8H1KvZ4ht9zRQw9GSIPaMlfWfH0LsFPdO3muQ7ycZO/ieKqzeiFnTNdORFxVyIR+JAl3wdm4C6F55JJNV5FK+ISOz22eXGt9NwM6ACmGy032+CT+HxP488ALO1H8BTc6B7pxbP918i5Bc8exO5eR29++U2HX4mBmEMe6FIKejaEXFr96DEHfnxXptOafQGdXoMxsop+iEFIgR8S+z+vULkyca5SA2Y+hX9YOgjxQYMk7dyg8cfKOGB4W/o9vXnE29Etyd79cJ2yCP9bKMEfMt1HzzbAEC6oc5/ew3/HTx+6Ry86NhfOwcvXPVV9m7c2bwhxIDwoOiPdsK9FbvD1+z+UkzDtwxPf6iUxrB/xe2f4e7/kxgbRl+bLuAiAr4KY6N/ZcaAN2J79FqrQ9dCAKUf8ecTvFu3y2fkGEBv7bWrGftfKMQvNhr8SPx8M+uHTshh7N6MHOt/ca5PK7UKHkhWeCAH59KvTrjni7LEu2rjn9red/gQRj6WsPgNrf5Txv8oo8clFdwfpnXp22XSv0Dp/v6CN0U8YtdeU4zEHr+IIoVuly1eLk5Ipy3jny2X9Q+nHEa/JBxCo0+0eB719LG+qltj90+3fX4/7RxFPuJ3PIhPdEKulyXuZCb8WGX57xJ6/FNl2caV/ezzSVWi+Pnyu6rsL7pnFy99VD/SusSf4vF6dVYS9LWRbyT21sr4HwW+weSLZfJzbNWPwt5e1sfOZtwP61/WRu/Xf58guScq/I0JiogXBAWS7hdrJR8d4wOjt5bfH+0//X2b7jrrKQaj/4RG77cKfhuEpe/zHGgQUKgD4y7x/8SQht/PkAnhL+gE/cqZ9cHrjsi9eKxn3hEw2Ef3CM6Df6As7zVp7yz5H/KX/D73YS9Dt3EYe7xoh+fE/ODliVv99Ikekf8524wgP800+5e4QL8QLq5A0UeoIlfAODkb/jxw0K+12y9eq8/esEK8cAkR6E/t8Jf18fMyzfva4dhtMv1Trt4+qpWLnMJxe3T8ZYl772wduuMUetqJ8DF5e/F73tj/6XWW3zcHgfn3cuvR0aH3hZZV7nCc4rex28+rXvIW9E+LfsGk3yfVR5I+x8Q9jn2+rRHpyQ86WzlxelQb8PE6SZy8jL8DxI3ixFOcLttXl++5XLHHgNjZ6aybkwe/UUBj5cP34xIumvMuSj9ruyyGPWL4nQ0KFwGP0vdcxzAEP0Lk8wjrW7CBOckj/WwLwyXI+u3PXbg9OOOCBTA0Vewks34HRxq+Bha3VPeKLDcu+vlJTvgFX/sncfEykffZxDjmuj2FRfvB5dlL8twTRs6DdSt8WAB/rl8eBGII4cA1/P36KJn6WTGXpQA1Tnykve+UVeOX1T8Fzs/Z+tXr+BcJ85tYeTfBRNw7V+cUCl/mTvo9Tv4pavYpTv7q/lsC7PmGon+EHegFdjJAnyA5mrcRUG5++qYguLDXK0DwjOrInZ0w70flH++feE7F8z7LP5DCF3Vxqtu/lBunoXJ6kvhKEGivyf/cl3hnb8P7IeLWVPxPJ3ySTsC+mk645728pxPoBxp56MNX/kCJ8c4CAXs1CD5NJ9wLrr2lMgrDYBL6n1Z4XxB8Fa1wG8/wn1b4JK1A/EOt8G6n9yG38gJoAAZ+YG/PlvpSYuCdufz1LoVfifp3I93tRlYYgVEER9/WCvyfE+C/H8D2SgH+BpS+e6rS7ZLUf/L7k+Q3dbut7qOs+rvQuA0eeXx8/Nrs/2bc/TNe+Rom+t0e3s7DvjzJPk5i/0uafhGLG4ZunfP/U/ttzpXeNUrgEhny6602Zxv2k6MEkEs/Xhkl8LI+elYcbxUlwNnagdgNSEvec9Bu7ClLiPgGf8qu7Ofrw8eAlXOsO/SIkeTD93h3oEhR7OF3A95fgcLvnPHYr8hdcQd5jBf+7eCrHwL7OV7v0uDNd4b9AF8I/ohS380S6voEcoxCH6HnS6DX7Z/Y89zkz1D8IkMY/lKonobjpqHf5i7kZRQh9Hv9uq7/PjE45K059tdkuYTp+wmnL3C7p5M/OBaevDW9ersqDmIg3M6u0FVx8YL+XVHyvyIfjt+LiLhPwXezqm4XPKd7/9zy/zh9EBR7xOjbWeaTNKd/FRv1sRsfL47OOwsRqyti/SCzH4zdy+z3q4DGy2rG6odLGbfQuG/Z3k6ZXg+he+L8Oo/VVXb4380f+CL93+fst0Wu5QNK3AnGu5cX9B0Rdy+o8ncRd4TaC9CpYFScYweYwu1J65727/6Htg9D27eb/Ob3Qq7eC293T7O49b5nadJ3OU77SUDvquyh8Jdux8fpl0HX6CNyR0S8V0Lt+5PfT8mC8nLy+1ZOk7uv+D6ukJvZFYrT1+bHS6PvB7O+t3JjIDfMx2VpsC+PjIFAul/9F279hcKtkZfx1fdnE/C9lKTUe0mDt13f/jtX1X7KnP8qfPoeFN5NMdwmqOvdNj9z3HyJZZk3W3X5GX/83koa8ZFke22UO/lAsw+M0PMETzyAmXPPGP9Fvr0fIF5kx/iA1Ao/c7T/J+E/XcLfC4b+UAkP37qjfu6Z/xIM/778TL+aip8l4OE7Lp1emmMPLHKU5sf9S32kIvdsLxN3qfO2cVP/a+L89eT/eHF+NwzkP4P9q4jze1HM7yXO70Lh1mD/8jFU7xsi9aWM9bs9vDXWvzzJPjns7Svb2z8NxPpZkuHrlYlfBD6838lab3Nm1r92hKEk/YiTN0E4v8pocYnwf/NFjc/JfPpJR3S/a/TihRd+nePoTN8vkgCL/PEGwtceygfj10v3kOdUzrdTpcK9ejKqqrw8PiU8nM7oKx/DLAMC38nj8tEFYhkR3BJUEYILpSYZABDC6n4RB/1/7t4BgE+ThPc5APA3pM6P1nN/INveILL2d8TRdYYdlMKutzLciKO7kfLvlSeJvHPQ2x+IxadJ6qdg8QZ4Xlz47jnNPhBjfcWPSvGJkLdwu10G+liM/diJ/Mdh7CqRy7/B2O9C6gaTH4QnnPh6ePpxUpY/CU9PDrSvIbM+CE8Ufosn9Jk6vE0V/V7YujshvDixP/YAzd/L/4egrzWb774i/Uqrmfgso/lnvf7fPU7+98PJX2TG789nwX/OSndjwd4iB+d9XrqN37tNwRn+1Sk4yZvU3f1Z3vgHZt28T7nbTTa3lMv/asrhxEu6IXejLz+Wbr9In/8f3RDiejqO3TnE94Npds8bdI9m/3c68uBIIxIMt/vHnntAvpECxKkXwhMjoLuzk7sO3ncj6D2Xys0ZFtyfSbq3Sj2NvtxtcIpX/Xnm6Q9d/4TveS1uyCj9dxTJdUpx+GVK8f4woF8dMvNe55LcJSxyG6d0h7DKX01HjHrEnp+u+2KnKgZ9Gq+uyW/f9tssC6WDSJdCNHC4zWVN7acqVPq7CXq9KIrQ2Efl+QeXRda7vp7uDcEUOxqDiXpf4/8B</diagram></mxfile>
2106.11613/main_diagram/main_diagram.pdf ADDED
Binary file (59.4 kB). View file
 
2106.11613/paper_text/intro_method.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Chinese character recognition (CCR), which has been studied for many years, plays an essential role in many applications. Existing CCR methods usually rely on massive input data. For example, the HWDB1.0-1.1 database [@liu2013online] provides more than two million handwritten samples collected from 720 writers, including 3,866 classes overall. However, there are 70,244 Chinese characters in total according to the latest Chinese national standard GB18030-2005[^2], thus collecting samples for each character is time-consuming.
4
+
5
+ <figure id="fig:introduction" data-latex-placement="t">
6
+ <img src="./image/new_introduction.png" style="width:48.0%" />
7
+ <figcaption>Three categories of CCR methods. The proposed method decomposes a character into a sequence of strokes, which are the smallest units of Chinese characters.</figcaption>
8
+ </figure>
9
+
10
+ Early work on CCR mainly relies on hand-crafted features [@su2003novel; @shi2003handwritten]. With the rapid development of deep learning, numerous CNN-based methods emerged and outperformed early traditional methods. Deep learning-based CCR methods can be divided into two categories: character-based methods and radical-based methods. Character-based methods treat each character as one class. For example, MCDNN [@cirecsan2015multi] ensembles results of eight deep nets and reaches human-level performance. DirectMap [@zhang2017online] achieves new state-of-the-art in this competition by integrating the traditional directional map with a CNN model. However, these character-based methods can not recognize the characters that have not appeared in training sets, namely a *character zero-shot problem*. In this case, several radical-based methods are raised by treating each character as a radical sequence. DenseRAN [@wang2018denseran] takes the first attempt to see CCR as a tree-structured image captioning task. HDE [@cao2020zero] designs a unique embedding vector for each Chinese character according to its radical-level constitution. However, there exist several drawbacks in existing radical-based methods: 1) Some radicals may not appear in training sets, namely a *radical zero-shot problem*. 2) Most of the previous radical-based methods ignore the fact that some characters have the same radical-level constitution. This problem even becomes worse as the increase of the alphabet capacity. 3) Since HDE [@cao2020zero] functions in an embedding-matching manner, it needs to store the embeddings of *all* candidates in advance, which costs a lot of space. 4) The radical-level decomposition leads to a more severe class imbalance problem.
11
+
12
+ In this paper, inspired by the fact that humans can easily generalize to grasp how to write characters unseen before if they have learned stroke orders of some characters, we propose a stroke-based method by decomposing a character into a combination of five strokes, including *horizontal*, *vertical*, *left-falling*, *right-falling*, and *turning*. The five strokes all frequently appear in Chinese characters (in Figure [1](#fig:introduction){reference-type="ref" reference="fig:introduction"}, they present in one character simultaneously). Thus, there does not exist a *stroke zero-shot problem*. Furthermore, each character or radical is uniquely represented as a stroke sequence according to the Unicode Han Database[^3], which helps pave the way for solving the character zero-shot and radical zero-shot problems fundamentally. However, we observe that there is a one-to-many relationship between stroke sequences and characters. To conquer this challenge, we employ a matching-based strategy to transform the predicted stroke sequence to a specific character in the test stage. The proposed method is validated on various kinds of datasets, including handwritten characters, printed artistic characters, and scene characters. The experimental results validate that the proposed method outperforms existing methods on both character zero-shot and radical zero-shot tasks. More interestingly, the proposed method can be easily generalized to those languages whose characters can be decomposed into strokes such as Korean. In summary, our contributions can be listed as follows:
13
+
14
+ - We propose a stroke-based method for CCR to fundamentally solve character and radical zero-shot problems.
15
+
16
+ - To tackle the one-to-many problem, we employ a matching-based strategy to transform the predicted stroke sequence to a specific character.
17
+
18
+ - Our method outperforms existing methods on both character zero-shot and radical zero-shot tasks, and can be generalized to other languages whose characters can be decomposed into strokes.
19
+
20
+ # Method
21
+
22
+ Traditional character-based methods use hand-crafted features like Gabor features [@su2003novel], directional features [@jin2001study], and vector features [@chang2006techniques]. However, the performance of them are limited by these low-capacity features [@chen2020text].
23
+
24
+ With the development of deep learning, several methods employ CNN-based models, which can automatically extract features from given images. MCDNN [@cirecsan2015multi] is the first case to employ CNN for CCR by ensembling eight models while outperforming human-level performance on recognizing handwritten characters. After that, ART-CNN [@wu2014handwritten] alternatively trains a relaxation CNN and takes the first place in the ICDAR2013 competition [@yin2013icdar]. In [@xiao2019template], a template-instance loss is employed to rebalance easy and difficult Chinese instances. However, these methods rely on massive data and can not handle characters that have not appeared in training sets.
25
+
26
+ Before the deep learning era, several radical-based methods are proposed using traditional strategies. In [@wang1996recursive], a recursive hierarchical scheme is introduced for radical extraction of Chinese characters. It needs accurate pre-extracted strokes, which is difficult to obtain due to the scribbled written styles in datasets. In [@shi2003handwritten], a method based on active radical modeling is proposed. It omits the stroke extraction procedure and achieves higher recognition accuracy. However, the pixel-wise matching and shape-parameter searching are time-consuming.
27
+
28
+ In recent years, the development of deep learning helps pave the way for radical-based methods. DenseRAN [@wang2018denseran] treats the recognition task as image captioning by regarding each character as a radical sequence. Based on DenseRAN, STN-DenseRAN [@wu2019joint] employs a rectification block for distorted character images. FewShotRAN [@wang2019radical] maps each radical to a latent space and constrains features of the same class to be close. Recently, HDE [@cao2020zero] designs an embedding vector for each character using radical-composition knowledge and learns the transformation from the sample space to the embedding space. These methods are capable of tackling the character zero-shot problem. However, some radicals may not appear in training sets in a data-hungry condition, which leads to another dilemma called radical zero-shot. Hence, these radical-based methods have not solved the zero-shot problem fundamentally.
29
+
30
+ Existing stroke-based methods usually rely on traditional strategies. In [@kim1999stroke], the authors propose a stroke-guided pixel matching method, which can tolerate mistakes caused by stroke extraction. In [@kim1999decomposition], a method based on mathematical morphology is raised to decompose Chinese characters. A model-based structural matching method [@liu2001model] is proposed with each character described by an attributed relational graph. In [@su2003novel], they present a method based on a directional filtering technique. These traditional methods need hand-crafted features, which are hard to adapt to different fields and applications. In general, these works have inspired us to combine stroke knowledge with deep learning models.
31
+
32
+ <figure id="fig:architecture" data-latex-placement="t">
33
+ <img src="image/architecture.png" style="width:98.0%" />
34
+ <figcaption>The overall architecture of the proposed model involves one encoder and two decoders at different levels. The feature-to-stroke decoder is used when training, whereas the stroke-to-character decoder is utilized when testing. Five strokes are encoded from “1” to “5”.</figcaption>
35
+ </figure>
36
+
37
+ <figure id="fig:stroke_introduction" data-latex-placement="t">
38
+ <img src="image/stroke_introduction.png" style="width:45.0%" />
39
+ <figcaption>Five basic categories of strokes. There are several instances of various shapes in each basic category.</figcaption>
40
+ </figure>
41
+
42
+ <figure id="fig:one_to_many" data-latex-placement="ht">
43
+ <embed src="image/one_to_many.pdf" style="width:45.0%" />
44
+ <figcaption>Illustration of the one-to-many problem. The <em>x</em>-axis denotes the one-to-<em>n</em> stroke and the <span class="math inline"><em>y</em></span>-axis denotes the quantity.</figcaption>
45
+ </figure>
46
+
47
+ Strokes are the smallest units for each Chinese character. When humans start learning Chinese, they usually learn to write strokes at first, then radicals, and finally the whole characters. Moreover, there exists a *regular pattern* in Chinese stroke orders, usually following left to right, top to bottom, and outside in. In other words, when humans have learned the stroke orders of some characters, they can naturally generalize to grasp how to write other characters (even though humans have never seen them before), which inspires us to design a stroke-based model to tackle the zero-shot problem.
48
+
49
+ According to the Chinese national standard GB18030-2005, five basic strokes are *horizontal*, *vertical*, *left-falling*, *right-falling*, and *turning*. As shown in Figure [3](#fig:stroke_introduction){reference-type="ref" reference="fig:stroke_introduction"}, each category contains instances of different shapes. Please note that the turning category contains more kinds of instances and we only show five of them in Figure [3](#fig:stroke_introduction){reference-type="ref" reference="fig:stroke_introduction"}. Stroke orders for each character are collected from the Unicode Han Database.
50
+
51
+ In fact, we observe that there is a one-to-many relationship between stroke sequences and characters. As shown in Figure [4](#fig:one_to_many){reference-type="ref" reference="fig:one_to_many"}, we explore the distribution of one-to-*n* sequences in 3,755 Level-1 characters (most commonly-used). Most of the sequences (about 92.5%) can perfectly match a single character. In the worst case, a sequence can correspond to seven possible results $(n=7)$. Therefore, it is necessary to design a module to match each sequence with a specific character.
52
+
53
+ The overall architecture is shown in Figure [2](#fig:architecture){reference-type="ref" reference="fig:architecture"}. In the training stage, the input image is fed into an encoder-decoder architecture to generate a stroke sequence. In the test stage, the sequence is first rectified by a stroke-sequence lexicon, which is further sent to a Siamese architecture to match a character from a confusable set. Details are introduced in the following.
54
+
55
+ In recent years, ResNet [@he2016deep] plays a significant role in optical character recognition tasks [@wang2019radical]. Residual blocks relieve the gradient vanishing problem, thus enabling a deeper network to fit training data more efficiently. We employ building blocks [@he2016deep] containing two successive $3 \times 3$ CNN as the unit of ResNet. Details of the encoder are shown in Supplementary Materials. For a given three-channel image $\mathbf{I} \in H \times W \times 3$, the encoder outputs a feature map $\mathbf{F}$ of size $\frac{H}{2} \times \frac{W}{2} \times 512$ for further decoding.
56
+
57
+ We employ the basic design of Transformer decoder [@vaswani2017attention]. The architecture is shown in Supplementary Materials. We denote the ground truth as $\mathbf{g}=(g_{1},g_{2},...,g_{T})$. A cross-entropy loss is employed to optimize the model: $l = -\sum_{t=1}^{T}\text{log} p(g_{t})$, where $T$ is the length of the sequential label and $p(g_{t})$ is the probability of class $g_{t}$ at the time step $t$.
58
+
59
+ Since a stroke sequence may not match with a specific character, a stroke-to-character decoder is further proposed in the test stage. Firstly, we build a lexicon $\mathcal{L}$ that contains stroke sequences of *all characters*. Nevertheless, in the worst case, the predicted sequence $\mathbf{p}$ may fail to match any characters in the lexicon, *i.e.* $\mathbf{p} \notin \mathcal{L}$. So we choose the one that has the least edit distance with the prediction $\mathbf{p}$ as the rectified prediction $\mathbf{p}_{\text{rec}}$. If matched initially, we consider the rectified prediction to be just the same as the original one, *i.e.* $\mathbf{p}_{\text{rec}}=\mathbf{p}$.
60
+
61
+ As shown in Figure [2](#fig:architecture){reference-type="ref" reference="fig:architecture"}, we manually collect a dictionary called *confusable set* $\mathcal{C}$ containing those one-to-many stroke sequences, *i.e.* those one-to-one characters will not appear in $\mathcal{C}$. If the rectified prediction is not in this set, *i.e.* $\mathbf{p}_{\text{rec}} \notin \mathcal{C}$, the decoder will generate the corresponding character directly. Otherwise, we employ a matching-based strategy via comparing features between the source image $\mathbf{I}$ and support samples $\mathcal{I}^{\prime}$ using a Siamese architecture. Specifically, for one head in the Siamese architecture, the feature map $\mathbf{F}$ of the input image $\mathbf{I}$ is given. For the other head, several support samples $\mathcal{I}^{\prime}$ containing printed images of characters with the same stroke sequence are fed to the encoder to generate a list of feature maps $\mathcal{F}^{\prime} = \{\mathbf{F}^{\prime}_{1},\mathbf{F}^{\prime}_{2},...,\mathbf{F}^{\prime}_{N}\}$, where $N$ is the number of possible results. We calculate similarity scores between $\mathbf{F}$ and each feature map $\mathbf{F}^{\prime}_{i}$, then selecting the one which is the most similar to $\mathbf{F}$ as the final result: $$\begin{equation}
62
+ \label{equa:similarity metric1}
63
+ i^{*} = \mathop{\arg\max}_{i\in\{1,2,...,N\}}D(\mathbf{F},\mathbf{F}^{\prime}_{i})
64
+ \end{equation}$$ where $i^{*}$ is the index of result and $D$ is the similarity metric:
65
+
66
+ $$\begin{equation}
67
+ \label{equa:similarity metric2}
68
+ D(x_{1},x_{2})=
69
+ \begin{cases}
70
+ 1-||x_{1}-x_{2}||_{2} & \text{Euclidean metric}\\
71
+ \frac{x_{1}^{T}x_{2}}{||x_{1}||\times||x_{2}||}& \text{Cosine metric} \\
72
+ \end{cases}
73
+ \end{equation}$$
74
+
75
+ Different from FewShotRAN [@wang2019radical], we do not use support samples during training, which guarantees the principle of zero-shot learning. Compared with HDE [@cao2020zero], our method costs less space since we only need to store the features of confusable characters in advance.
2107.08981/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2107.08981/paper_text/intro_method.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ We are interested in developing control algorithms that enable robots to solve complex and practical tasks such as operating kitchens or assisting humans with everyday chores at home. There are two general characteristics of real-world tasks – long-horizon planning and generalizability. Practical tasks are often long-horizon in the sense that they require a robot to complete a sequence of subtasks. For example, to cook a meal a robot might need to prepare ingredients, place them in a pot, and operate the stove before the full meal is ready. Additionally, in the real world many tasks we wish our robot to solve may differ from tasks the robot has completed in the past but require a similar skill set. For example, if a robot learned to open the top cabinet drawer it should be able to quickly adapt that skill to open the bottom cabinet drawer. These considerations motivate our research question: *how can we learn skills that enable robots to generalize to new long-horizon downstream tasks?*
4
+
5
+ Recently, learning data-driven behavioral priors has become a promising approach to solving long-horizon tasks. Given a large unlabeled offline dataset of robotic demonstrations solving a diverse set of tasks this family of approaches [Singh et al.](#page-11-0) [\(2020\)](#page-11-0); [Pertsch et al.](#page-10-0) [\(2020\)](#page-10-0); [Ajay et al.](#page-9-0) [\(2021\)](#page-9-0) extract behavioral priors by fitting maximum likelihood expectation latent variable models to the offline dataset. The behavioral priors are then used to guide a Reinforcement Learning (RL) algorithm to solve downstream tasks. By selecting skills from the behavioral prior, the RL algorithm is able to explore in a structured manner and can solve long-horizon navigation and manipulation tasks. However, the generalization capabilities of RL with behavioral priors are limited since a different RL agent needs to be trained for each downstream task and training each RL agent often requires millions of environment interactions.
6
+
7
+ On the other hand, few-shot imitation learning has been a promising paradigm for generalization. In the fewshot imitation learning setting, an imitation learning policy is trained on an offline dataset of demonstrations
8
+
9
+ ![](_page_1_Picture_1.jpeg)
10
+
11
+ Figure 1: In this work we are interested in enabling autonomous robots to solve complex long-horizon tasks that were unseen during training. To do so, we assume access to a large multi-task dataset of demonstrations, extract skills from the offline dataset, and adapt those skills to new tasks that were unseen during training.
12
+
13
+ and is then adapted in few-shot to a downstream task [Duan et al.](#page-9-1) [\(2017\)](#page-9-1). Few-shot imitation learning has the added advantage over RL in that it is often easier for a human to provide a handful of demonstrations than it is to engineer a new reward function for a downstream task. However, unlike RL with behavioral priors, few-shot imitation learning is most often limited to short-horizon problems. The reason is that imitation learning policies quickly drift away from the demonstrations due to error accumulation [Ross et al.](#page-11-1) [\(2011b\)](#page-11-1), and especially so in the few-shot setting when only a handful of demonstrations are provided.
14
+
15
+ While it is tempting to simply combine data-driven behavioral priors with few-shot imitation learning, it is not obvious how to do so since the two approaches are somewhat orthogonal. Behavioral priors are trained on highly multi-modal datasets such that a given state can correspond to multiple skills. Given a sufficiently large dataset of demonstrations for the downstream task the imitation learning algorithm will learn to select the correct mode. However, in the few-shot setting how do we ensure that during training on downstream data we choose the right skill? Additionally, due to the small sample size and long task horizon it is highly likely that a naive imitation learning policy will drift from the few-shot demonstrations. How do we prevent the imitation learning policy from drifting away from downstream demonstrations?
16
+
17
+ The focus of our work is the setup illustrated in Figure 1; we introduce Few-Shot Imitation Learning with Skill Transition Models (FIST), a new algorithm for few-shot imitation learning with skills that enables generalization to unseen but semantically similar long-horizon tasks to those seen during training. Our approach addresses the issues with skill selection and drifting in the few-shot setting with two main components. First, we introduce an inverse skill dynamics model that conditions the behavioral prior not only on the current state but also on a future state, which helps FIST learn uni-modal future conditioned skill distribution that can then be utilized in few-shot. The inverse skill model is then used as a policy to select skills that will take the agent to the desired future state. Second, we train a distance function to find the state for conditioning the inverse skill model during evaluation. By finding states along the downstream demonstrations that are closest to the current state, FIST prevents the imitation learning policy from drifting. We show that our method results in policies that are able to generalize to new long-horizon downstream tasks in navigation environments and multi-step robotic manipulation tasks in a kitchen environment. To summarize, we list our three main contributions:
18
+
19
+ - 1. We introduce FIST an imitation learning algorithm that learns an inverse skill dynamics model and a distance function that is used for semi-parametric few-shot imitation.
20
+ - 2. We show that FIST can solve long-horizon tasks in both navigation and robotic manipulation settings that were unseen during training and outperforms previous behavioral prior and imitation learning baselines.
21
+ - 3. We provide insight into how different parts of the FIST algorithm contribute to final performance by ablating different components of our method such as future conditioning and fine-tuning on downstream data.
22
+
23
+ # Method
24
+
25
+ **Few-shot Imitation Learning**: We denote a demonstration as a sequence of states and actions: $\tau = \{s_1, a_1, s_2, a_2, \dots, s_T, a_T\}$ . In a few-shot setting we assume access to a small dataset of M such expert demonstrations $\mathcal{D}^{\text{demo}} = \{\tau_i\}_{i=1}^{i=M}$ that fulfill a specific long horizon task in the environment. For instance a sequence of sub-tasks in a kitchen environment such as moving the kettle, turning on the burner and opening a cabinet door. The goal is to imitate this behavior using only a few available example trajectories.
26
+
27
+ **Skill Extraction**: In this work we assume access to an unlabeled offline dataset of prior agent interactions with the environment in the form of N reward-free trajectories $\{\tau_i = \{(s_t, a_t)\}_{t=1}^{t=T_i}\}_{i=1}^{i=N}$ . We further assume that these trajectories include semantically meaningful skills that are composable to execute long horizon tasks in the environment. This data can be collected from past tasks that have been attempted, or be provided by human-experts through teleoperation Zhang et al. (2018).
28
+
29
+ Skill extraction refers to an unsupervised learning approach that utilizes this reward-free and task-agnostic dataset to learn a skill policy in form of $\pi_{\theta}(a|s,z)$ where a is action, s is the current state, and z is the skill. Our hypothesis is that by combining these skill primitives we can solve semantically similar long-horizon tasks that have not directly been seen during the training. In this work we propose a new architecture for skill extraction based on continuous latent variable models that enables a semi-parametric evaluation procedure for few-shot imitation learning.
30
+
31
+ Our method, shown in Fig. 2, has three components: (i) Skill extraction, (ii) Skill adaptation via fine-tuning on few-shot data, and (iii) Evaluating the skills using a semi-parametric approach to enable few-shot imitation.
32
+
33
+ (i) Skill Extraction from Offline Data: We define a continuous skill $z_i \in \mathcal{Z}$ as an embedding for a sequence of state-action pairs $\{s_t, a_t, \dots, s_{t+H-1}, a_{t+H-1}\}$ with a fixed length H. This temporal abstraction of skills has proven to be useful in prior work Pertsch et al. (2020); Ajay et al. (2021), by allowing a hierarchical decomposition of skills to achieve long horizon downstream tasks. To learn the latent space $\mathcal Z$ we propose training a continuous latent variable model with the encoder as $q_\phi(z|s_t, a_t, \dots, s_{t+H-1}, a_{t+H-1})$ and the decoder as $\pi_\theta(a|s,z)$ . The encoder outputs a distribution over the latent variable z that best explains the variation in the state-action pairs in the sub-trajectory.
34
+
35
+ The encoder is an LSTM that takes in the sub-trajectory of length H and outputs the parameters of a Gaussian distribution as the variational approximation over the true posterior $p(z|s_t, a_t, \ldots, s_{t+H-1}, a_{t+H-1})$ . The decoder is a policy that maximizes the log-likelihood of actions of the sub-trajectory conditioned on the current state and the skill. We implement the decoder as a feed-forward network which takes in the current state $s_t$ and the latent vector z and regresses the action vector directly. This architecture resembles prior works on skill extraction Pertsch et al. (2020).
36
+
37
+ To learn parameters $\phi$ and $\theta$ , we randomly sample batches of H-step continuous sub-trajectories from the training data $\mathcal{D}$ and maximize the evidence lower bound (ELBO):
38
+
39
+ $$\log p(a_t|s_t) \ge \mathbb{E}_{\tau \sim \mathcal{D}, z \sim q_{\phi}(z|\tau)} \left[ \underbrace{\log \pi_{\theta}(a_t|s_t, z)}_{\mathcal{L}_{\text{rec}}} + \beta \underbrace{\left(\log p(z) - \log q_{\phi}(z|\tau)\right)}_{\mathcal{L}_{\text{reg}}} \right] \tag{1}$$
40
+
41
+ where the posterior $q_{\phi}(z|\tau)$ is regularized by its Kullback-Leibler (KL) divergence from a unit Gaussian prior $p(z) = \mathcal{N}(0, I)$ and $\beta$ is a parameter that tunes the regularization term Higgins et al. (2016).
42
+
43
+ To enable quick few-shot adaptation over skills we learn an inverse skill dynamics model $q_{\psi}(z|s_t,s_{t+H-1})$ that infers which skills should be used given the current state and a future state that is H steps away. To train the inverse skill dynamics model we minimize the KL divergence between the approximated skill posterior $q_{\phi}(z|\tau)$ and the output of the state conditioned skill prior. This will result in minimizing the following loss with respect to the parameters $\psi$ :
44
+
45
+ $$\mathcal{L}_{\text{prior}}(\psi) = \mathbb{E}_{\tau \sim \mathcal{D}} \left[ D_{KL}(q_{\phi}(z|\tau), q_{\psi}(z|s_t, s_{t+H-1})) \right]. \tag{2}$$
46
+
47
+ We use a reverse KL divergence to ensure that our inverse dynamics model has a broader distribution than the approximate posterior to ensure mode coverage Bishop (2006). In our implementation we use a feed-forward network that takes in the concatenation of the current and future state and outputs the parameters of a Gaussian distribution over z. Conditioning on the future enables us to make a more informative decision on what skills to execute which is a key enabler to few-shot imitation. We jointly optimize the skill extraction and inverse model with the following loss:
48
+
49
+ <span id="page-4-0"></span>
50
+ $$\mathcal{L}(\phi, \theta, \psi) = \mathcal{L}_{rec}(\phi, \theta) + \beta \mathcal{L}_{reg}(\phi) + \mathcal{L}_{prior}(\psi)$$
51
+ (3)
52
+
53
+ - (ii) Skill Adaption via Fine-tuning on Downstream Data: To improve the consistency between the unseen downstream demonstrations and the prior over skills, we use the demonstrations to fine-tune the parameters of the architecture by taking gradient steps over the loss in Equation 3. In the experiments we ablate the performance of FIST with and without fine-tuning to highlight the differences.
54
+ - (iii) Semi-parametric Evaluation for Few-shot Imitation Learning: To run the agent, we need to first sample a skill $z \sim q_{\psi}(z|s_t, s_{t+H}^*)$ based on the current state and the future state that it seeks to reach. Then, we can use the low-level decoder $\pi(a_t|z, s_t)$ to convert that sampled skill z and the current state $s_t$ to the corresponding action $a_t$ . During evaluation we use the demonstrations $\mathcal{D}^{\text{demo}}$ to decide which state to use as
55
+
56
+ the future state to condition on. For this purpose we use a learned distance function d(s, s') to measure the distance between the current state $s_t$ and every other state in the demonstrated trajectories. Then, from the few-shot data we find the closest state $s_t^*$ to the current state according to the distance metric:
57
+
58
+ $$s_t^* = \min_{s_{ij} \in \mathcal{D}^{\text{demo}}} d(s_t, s_{ij}) \tag{4}$$
59
+
60
+ where $s_{ij}$ is the $j^{\text{th}}$ state in the $i^{\text{th}}$ trajectory in $\mathcal{D}^{\text{demo}}$ . We then condition the inverse dynamics model on the current state $s_t$ and the state $s_{t+H}^*$ , H steps ahead of $s_t^*$ , within the trajectory that $s_t^*$ belongs to. If by adding H steps we reach the end of the trajectory, we use the end state within the trajectory as the target future state. The reason for this look-ahead adjustment is to ensure that the sampled skill always makes progress towards the future states of the demonstration. After the execution of action $a_t$ according to the low-level decoder, the process is repeated until the fulfillment of the task. The procedure is summarized in Algorithm 1.
2107.10140/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2107.10140",
3
+ "month": "2021_07",
4
+ "year": 2023,
5
+ "conference": "ICCV",
6
+ "title": "CrossMatch: Source-Free Domain Adaptive Semantic Segmentation via Cross-Modal Consistency Training",
7
+ "arxiv_url": "https://arxiv.org/abs/2107.10140",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.10140",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/tex_files_extracted/2107.10140",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.10140/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.10140/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.10140/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.10140/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2107.11298/main_diagram/main_diagram.drawio ADDED
The diff for this file is too large to render. See raw diff
 
2107.11298/paper_text/intro_method.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ Computer-generated imagery (CGI) and 3D graphics play an important role in a variety of applications, including visual effects, architectural modeling, simulators, cultural heritage, video games, virtual or augmented reality and automotive design.
4
+ The increasing computational power of both professional and consumer hardware has led to a growing interest in high-quality CGI, whose basic requirement for creating realistic images is the definition and implementation of robust digital models that describe how real-world materials interact with light .
5
+ This task is easily carried out by humans, who are able to intuitively identify materials' physical properties by analyzing how light is reflected, transmitted and absorbed before reaching the observer's eyes. Artificially emulating this process would require a physically-accurate simulation of how generic materials interact with light, but the complexity of such task and the needed level of surface details make this approach computationally unfeasible.
6
+
7
+ In practice, most approximations for rendering reflections over surfaces simplify the task by defining a model that describes how light interacts with pixel-level elements of a material depicted in a digital image: the properties of the material are modeled by a spatially-varying bidirectional reflectance distribution function (SVBRDF) that is parameterized by a set of properties encoding color, planar deformation and reflectivity.
8
+ However, even measuring this approximation is a major challenge in computer graphics.
9
+
10
+ Following the success of deep learning methods in computer vision, the estimation of material reflectance properties has been increasingly posed as a learning task . Following this trend, we propose SurfaceNet, a fully-convolutional network for SVBRDF estimation from a single input image. Unlike methods that estimate material properties from multiple input images , our approach better suits non-professional application scenarios, where it is unfeasible to obtain reliable acquisitions of a surface with a sufficiently steady view point.
11
+
12
+ More specifically, we pose SVBRDF estimation as an image-to-image translation task and introduce a deep generative adversarial architecture, consisting of a generator that employs a fully-convolutional multi-head encoder-decoder network that predicts a set of SVBRDF maps, and a patch-based discriminator that is trained to distinguish between estimated maps and ground-truth ones.
13
+ We propose to use a generative adversarial loss to compensate for the blurriness typically introduced by $L_1$ or $L_2$ losses. This is an alternative approach to recent methods that perform neural rendering to produce output images from the estimated reflectance maps as a supervisory signal.
14
+ Moreover, the GAN framework also allows us to employ real-world images, for which no reflectance maps are available, during the training procedure, alongside synthetic images. As a result, our generator learns to extract features that can be shared between synthetic and real images, enforcing an implicit domain adaptation mechanism and reducing the distribution gap between input images from the two modalities.
15
+ the combined use of skip connections within the generator and of a patch-based discriminator allows the model to focus on and recover detailed features of small patches. We argue that these characteristics are particularly appropriate for SVBRDF estimation of real-world materials, where the capability to work at high-resolution on surface details is essential and where pattern structure is generally local.
16
+
17
+ We evaluate our method on a wide variety of materials from publicly-available SVBRDF libraries and real-world pictures of surfaces; in our experiments on single-image inputs, our method largely outperforms previous works, both qualitatively and quantitatively.
18
+
19
+ To summarize, the contributions introduced by the proposed method are the following:
20
+
21
+ - We present a deep network for single-image SVBRDF estimation that, leveraging the properties of GANs in generating high-frequency details and learning data distributions in an unsupervised way, is able to predict high-quality reflectance maps from real-world photographs;
22
+ allows our model to recover fine, local, features in the output maps, even at high resolution (2048$\times$2048);
23
+ - Experimental results on multiple datasets under different illumination conditions show that our model largely outperforms, both quantitatively and qualitatively, existing single-image estimation methods, setting new state-of-the-art performance on the task.
24
+
25
+ # Method
26
+
27
+ The objective of our work is to estimate the pixel-level reflectance properties of a spatially-varying material from a single input image.
28
+ We assume that the considered surface is mostly planar and that non-planar surface details can be modeled by a normal map. In the implementation that we present here, we approximate surface reflectance at each point through the Cook-Torrance model using the GGX microfacet distribution . However, our approach can be indifferently applied to any reflectance model whose properties can be estimated in terms of spatial maps.
29
+
30
+ It is well known that, in tasks where the supervisory signal consists in whole images (e.g., image synthesis, image-to-image translation, or the task at hand), $L_1$ and $L_2$ reconstruction losses are able to enforce correctness at the low frequencies, but tend to produce blurry results and miss high-frequency details . We overcome this limitation by complementing a reconstruction loss with an adversarial loss, and training a discriminator network to distinguish whether an input set of material maps is produced by the generator or sampled from the training set. At the same time, the generator is also trained to maximize the probability of the discriminator believing that the estimated maps have the same quality as the ground truth. As a result, both models simultaneously improve, and in particular the generator is pushed --- beyond the limits of the $L_1$ loss --- to produce output maps that are as realistic as possible.
31
+ Furthermore, we train the whole model with two sets of data: a set of annotated synthetic images to supervisedly enhance the overall estimation quality, and a set of real images without annotations to allow the model to learn, in an unsupervised way, how to correctly estimate reflectance maps in case of real-world input images.
32
+
33
+ We design our method, SurfaceNet, as an image-to-image translation problem within a generative adversarial framework, where the image of a planar material is translated into the corresponding set of SVBRDF maps. An overview of our approach is shown in Fig. . The generator network receives an input image and acts as our SVBRDF estimator, by providing surface reflectance maps as output. These maps are then fed to the discriminator network, which aims at distinguishing them from ground-truth maps from the training set.
34
+ During training, the generator and discriminator adversarially compete, with the former trying to mislead the latter by generating more and more realistic maps, and the latter learning to identify which maps are produced by the generator.
35
+
36
+ In the following, we introduce and describe each module of the proposed framework. Architectural details of individual layers are included in the supplementary materials.
37
+
38
+ \centering
39
+ \includegraphics[width=0.8\linewidth]{imgs/figures/network.png}
40
+ \caption{Overview of the architecture of the SurfaceNet generator. The number of heads is variable and depends on the number of maps to predict.}
41
+
42
+ The generator network in SurfaceNet, illustrated in Fig. , is inspired by the architecture of DeepLabV3 , an encoder-decoder semantic segmentation model based on ResNet-101 . The input to the model is an RGB image of arbitrary size, since the architecture is fully-convolutional; in our experiments, we train our model on images of size 256$\times$256. The encoder of our generator consists of a variant of ResNet-101 followed by Atrous Spatial Pyramid Pooling (ASPP) to extract multi-scale features. The output of the DeepLabV3 model has size 32$\times$32: in order to recover the original size of the image, we append a cascade of upsampling blocks, implemented as transposed convolutions alternated to residual layers. Each upsampling block also receives a correspondingly downsampled copy of the input through skip connections, in order to provide information useful to reconstruct fine details. After the upsampling stage, the model produces a set of 256 feature maps, each of size 256$\times$256. The final reflectance maps are obtained by feeding these shared feature maps to independent prediction heads. Note that most of the computation carried out by the model is shared by all output maps, thus improving efficiency, encouraging feature reuse and allowing the model to correlate information across different reflectance parameters.
43
+
44
+ The architecture of the discriminator is inspired from the original work of Isola et al. . The network consists of 6 convolutional layers, such that the spatial size of the output feature maps is reduced by a factor of 18. As a consequence, a set of 256$\times$256 maps is reduced to a 1-channel map of size 14$\times$14. We treat this output map as a set of patch-level scalar predictions by the discriminator. This allows the discriminator to work independently on overlapping patches, returning a prediction for each patch, and to focus on the reconstruction of local details. The set of responses for each patch is then averaged to provide the final output of the discriminator. The patch-based discriminator is particularly suitable for material surface reconstruction, which requires identifying and recovering fine details but where global structure is generally lacking, and can be recovered by associating a standard $L_1$ loss.
45
+ The architecture of the patch discriminator is described in detail in the supplementary materials.
46
+
47
+ Formally, given input image $I$, representing a real surface of a certain material or the rendering of a synthetic image, and the corresponding reflectance maps $\left\{ M_1, M_2, \dots, M_k\right\}$ (with $k$ depending on the employed surface reflectance model), and given a neural network $G$ (i.e., our generator) that estimates a set of approximated reflectance maps $\left\{ \hat{M}_1, \hat{M}_2, \dots, \hat{M}_k\right\}$ from $I$, the objective of the training procedure is to optimize the parameters $\bm{\theta}$ of the neural model $G$ and minimize a loss function encoding the approximation error:
48
+
49
+ \argmin_\theta \sum_i \mathcal{L}\left( I_i, M_{i,1}, \dots, M_{i,k} \right)
50
+
51
+ with $i$ iterating over the training dataset. \\
52
+ The training strategy includes two different streams: one, supervised, applied when feeding the model with synthetic data and corresponding ground-truth maps, and another, unsupervised, where instead we feed real data to the model and do not use any annotations. Consequently, the overall loss $\mathcal{L}$ consists of two terms --- a supervised loss (which acts as a reconstruction loss) and an adversarial unsupervised loss:
53
+
54
+ \mathcal{L} = \mathcal{L}_\text{sup} + \alpha \mathcal{L}_\text{unsup}
55
+
56
+ weighed by a hyperparameter $\alpha$.
57
+
58
+ The supervised loss $\mathcal{L}_\text{sup}$ is computed only on images for which SVBRDF maps are available at training time, and consists in a reconstruction loss that evaluates the global similarity between ground-truth maps and the maps predicted by the generator. $\mathcal{L}_\text{sup}$ is specifically composed by: 1) a $L_1$ loss term that compares each pixel independently, and 2) an MS-SSIM loss term that preserves high-frequency contrast but is not sensitive to uniform biases, possibly causing changes of brightness or colors . Therefore, the supervised loss is computed as follows:
59
+
60
+ \mathcal{L}_\text{rec} = \sum_k \left[ \left\lVert M_k - \hat{M}_k \right\rVert_1 + \beta \text{MS-SSIM}\left(M_k, \hat{M}_k \right) \right]
61
+
62
+ where $k$ iterates over reflectance maps, $\text{MS-SSIM}(\cdot,\cdot)$ computes the MS-SSIM similarity between a pair of maps, $\beta$ acts as a weighing factor. and $\left\{ \hat{M}_1, \dots, \hat{M}_k \right\} = G(\mathbf{I})$.
63
+
64
+ As unsupervised loss $\mathcal{L}_\text{adv}$, we use the standard GAN adversarial loss at patch level, that aims at pushing the predictor $G$ to synthesize patches that are indistinguishable from ground-truth ones to a discriminator $D$, while training the same discriminator to improve at separating the two data sources.
65
+ Assuming that the output $D\left( M_1, \dots, M_k \right)$ of the discriminator is a scalar likelihood value, computed as the mean of patch-level predictions, we can define the adversarial loss $\mathcal{L}_\text{sup}$ as the sum of a discriminator loss $\mathcal{L}_\text{disc}$ and a generator loss $\mathcal{L}_\text{gen}$, that can be respectively computed for a single sample as follows:
66
+
67
+ \mathcal{L}_\text{disc}
68
+ = \log D\left( M_1, \dots, M_k \right)
69
+ + \log \left( 1 - D\left( G\left( I \right) \right) \right)
70
+
71
+ \mathcal{L}_\text{gen}
72
+ = \log D\left( G\left( \mathbf{I} \right) \right)
73
+
74
+ We apply the adversarial loss both on synthetic data with ground truth and on unannotated real data. As a result, the generator improves at estimating reflectance maps with high-frequency details, while at the same time filling the domain gap between synthetic and real images, by learning input feature representations that are equally applicable to both data sources.
2108.02479/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2108.02479",
3
+ "month": "2021_08",
4
+ "year": 2023,
5
+ "conference": "AAAI",
6
+ "title": "HyperJump: Accelerating HyperBand via Risk Modelling",
7
+ "arxiv_url": "https://arxiv.org/abs/2108.02479",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.02479",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.02479",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.02479/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.02479/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.02479/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.02479/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2108.06583/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2108.06583",
3
+ "month": "2021_08",
4
+ "year": 2025,
5
+ "conference": "IJCAI",
6
+ "title": "Coupling Category Alignment for Graph Domain Adaptation",
7
+ "arxiv_url": "https://arxiv.org/abs/2108.06583",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.06583",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.06583",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.06583/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.06583/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.06583/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.06583/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2108.13499/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-03-25T01:07:49.542Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36" version="14.5.0" etag="yKspXBrEK_wQ5nLi0vqx" type="device"><diagram id="gita3ue0JR-GKvZEt5bc">7Z1fc6O2Goc/jaenF/EgQBJcbuxue3Ha6ZydM22vOgQUmyk2LiaJ009fCYTN30BsQEJmd2cNAgS87yMh/UCvFsZqd/oxcg7bn0OPBAtd804LY73QdaDbOv1hKe9pCjLtNGET+V6apF0Svvn/kDQR8sQX3yNHnlmaFIdhEPuHYqIb7vfEjQtpThSFb8XdnsPAKyQcnA0pXAVL+OY6Aans9pvvxds0VTcM+7LhJ+JvtvzUhqHxjHZOtjfP47h1vPAtdzLjh4WxisIwTpd2pxUJmPGKdvnasPV8ZRHZx10O4I54dYIXfnMLHQX00MfjwdmzC4zf+W2jv1/YVT0+h/v44dnZ+QH13pcFOwFydge6ZUHv1KS/WxK8kth3ncoWtnfyf5LJMXEsy0I3D6fLGejSJv012T+4enKiBX5kC9RVx/cd/WGuXOD1n68L+IPGd+RXTu82vfgsF71wH3oUvuw9wgzALuZt68fk28Fx2dY3yiu7gXgX0DVwPvqVRDE5NdoYnD1HkSfhjsTRO92FH6Bb3Pucdh3x9bcLPCZP2uaw0TNqHM7r5pz1xaN0gTu13sHG1B1M01+/owvy+9nQBfrZrPGzaHvY7fbQh7IHlM8eptluD2MoeyD57AE1geUF92yPYxyFf5FVGIRRcrRhIMM2PFad+kGQS8e6/XX1dRgLntdbLAj6sKAlH1GoVMLO661E3W4OWwWgMGg3IBrIgNmpp21BSxOHIADyFUmMBFZRoK57I9ggZUDGNUhdd0C0QbBIg0jYbrZFtotA3w1nIbWw3aGpPZwJ+25rCzEh0IBIG9a1z9NuP1MWFo26RR+SA6nVGtLz9qE1lLzmeuTJevrQz334Exj2ErZ6tL5114dHO/QXjlvnwBafA3L6woRaeutk7/HFtRs4x6PvFo1Jt2ciLDCWGKVJXDM2l5rF/MF3gEtUNb+mQY0815lf08D6cXU2P/EyWbi78XOWhTWWzdIiEjix/1rMvs7a/Ay/hn5SCDJlAWtLLfdHL7jZQCX3HcOXyCU8j4sHK9nSJ/FH2UJ7ie3cH1g8S+xEGxJXzpJwcrZQN3Q69K3GQYe6O3r/nZfyZOUPtkLLFV9dn/Ib1+98rXMN/pj8bUYuNWqx7SINhtBs48WCOV6uhLKkpleyRUNhmHXRJMLwgRrcsIssmjZuoTFZ+5VEPr15EvHEDsBloupEgIMmJeNKyMq9VqgNRlWHTvvYldulPvujAFA9TqUqzEpc0Y0oLBtQ+kdAmbSqydc019ZgJbgq2eLBWOugh4zDmuwtKqvUgWR1Cbr94VXROgasVzpIPUrVK9KwQzlZ5ouzVXA5KnemurLTki19FJq5mgsMxlXfipkQdaP8pmhU0VHGl9NQpArb5e20ci1reeor8GEruvyI6lxfGdbSNJrqK/ZAtUepr/p+099DYSvL0+MWNmmEONkLhg2qjUBLv7kRWPE+1JYAj1EUpNHRFJRgS6xcLbqarSLaOKJr9iXaDEv/sBioJ1jKn1iOiEff35oIesnZ4dvswb7JNKSRY+R6ryFbcUVWq8xcyrNr+cUabSOblezwYE0AQz5V6J56Wh+ChG4FqbGzhYZlqoMiNDM1FFO4JNzcRJFdqY4GRqeDEiZWpAafE6k1zer88iv7OlUalKzSACbEvhgyrkPJMqso0exMY4R+riGfnDgsVdIQZMNLbZG93bSvh6guu3JXqUdsOiiFgnq8DIAJuB9osNofzb82teB1IFT6abTHXNeSGgAKadTSqUJh13yw1Q8TbZJbjxTIq5xOhAIAtPLnx4lYpVX6LJ/moPphc1I7wBFqh0zJ/8zwejf1HvtEPdo8/YfmQP/RM2m5pe8XhXH0/QzGT6+E5bEPo50TFDe/cZ+z7WZ6LcnGgMS0M/RAb8f195va42Nyih+cwN/s080uRY72nwqb/b2XgMi2a+zD/PzGOHL2x2eaaZb9npx3eAsjr3j2/OFPjvvXJtEZH0p21U3rbE+dxRfJlmHOup5/PAQOt6y/D/zciZ+D0InzF9Q4lOD4smNhCly66DOf018W6MNNTLRa4PUvf7ppcky7lsfOR6QgPUWlE3YMf8As2y67cn5vElExLpe/TJbOq6jZVw/5ysXQmuuRzpEP6pTouQzeVRnEq3IJWuULHF2s3WMiBawcOALq1eJV9+jupXTpc+m689IFcuUITKbQQLv4TELZ86flG5teCk1z1CXPf60tM02EVV2SL3zFND4y8jm16uXYfPk5Z9c+gBKgugGUqHlMZAMB5+Tk3m/ighFB7yP4khpqvfM9j+UwSLgio6YRMxQwda9QFAEGsA/OlCSmEtBpTGLq3pwoQowOkaLEQK00AtvQl9XHUu0HJ7AHZurei8zMyM4MLjJj4m61TC/ENIdhmImRlhhkCiSm7n3JTIzkxJSjdI1KTN27lZkYyYmxNHF1TKZ1zcRMihiB7RjY/I5g8sRAoCtKDNZLEh4Yr68Em3XvyROjbh1jmyL711Bh2VdhZoRqMlBh5VddZoAGhEKjsPir7uuC6mDDcaFpVn8nMhXMPNdPi4eb1dqJeHie7KeboyWce0PsbD99z77Rg0HETvfTIXrDXc33g674SlSuunHop5+QqORCn5eoWeOZCBMjPS+FoCH0CYv6nihFSHgToc9kpESgSLFPcSUmYxH73FdiMhah0TmRhBEUhU4rhvrufkkxU924Juy7wybGhCKLZXbuaZtQaChUXNcpFFyzCZ39Ddf1iCbHlMhpYPEVE67P4+Xm8XLTGC830BBuLE0EuTnWrISxZnGHbvCMx3V4KBe3GksT/E1BWNSeZxJLEwDuHgOZDjiZX9tEJKg6rdIAdEkTSe4e6VJ7khssTXy6u2RLybDe2Zv5mak5rPdn0emgq85TTCWWmvw8o5Yui6+b+1VTDZqqgHZjSTPhiHp4KKfdWNLowArCorZ2Y0mjEQ/RWp0qdO2aTp4OfCWCcug7ljTC80zgveg+ltKK9WSZU1MPUlq/nipr09aJpJGtp+r+6etHmTIxM3AtA6pNU29Lox6rpwJU52pBZUd25YPm1ZZVj0zosjBxmdKx2O4YbapsmVky4fUsmXklAJfyNZfYHowtaSTqe5suFGgWbXFajf0ZcCVOLflCDJcmHgwnaUTsJpy0z+FkJY/7qeBUDhlpL/PN4SvbQnU5DzfbpN0c3ouH4GqNraVrdSEF3HDPooOBE0hGPvDBIfunYzpGRHv8JR0R8T/y3/83BuEqocwHKORA5eM+1nzQxwdBtSJCr9h5SrJigB2Y8RJzwscFXLO8XuIwvaveIjTpxR4uzJqbOUaBjaqQ9jE6yG6OwXWrZw/vynqsFCMEGdWRKVm41t4d1hxS6zaHeSQtjGq6DABQeuTWlDJcM5qoF581T0Fwm892DhtmdghZ/BVl/dZe1jJX3ug3uhqFzL6Xp2TkHLY/hx5he/wL</diagram></mxfile>
2108.13499/main_diagram/main_diagram.pdf ADDED
Binary file (17.4 kB). View file
 
2108.13499/paper_text/intro_method.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ 3D scene synthesis is a fundamental problem in deep generative modeling. This task is challenging because 3D scenes exhibit diverse patterns, ranging from continuous ones, such as the size of each object and the relative poses between pairs of shapes, to discrete patterns, such as occurrence and co-occurrence of objects and symmetric relations. Moreover, there are also generic geometric constraints, e.g., synthesized objects in a 3D scene should not inter-penetrate. Developing neural networks to capture all feature patterns while enforcing geometric constraints remains an open problem. Due to the diversity of feature
4
+
5
+ <span id="page-0-0"></span>![](_page_0_Picture_7.jpeg)
6
+
7
+ Figure 1: Randomly generated scenes (left) and their nearest neighbours (right) in the training set in 3D-FRONT.
8
+
9
+ patterns and constraints, the popular approach of developing a single data representation and training approach proves insufficient.
10
+
11
+ This paper introduces a novel approach to synthesizing 3D scenes represented as a collection of objects. Each object is encoded by its attributes such as size, pose, existence indicator, and geometric codes (c.f. [\[42,](#page-9-0) [56\]](#page-10-0)). The theme of our approach is to look at 3D scene synthesis from hybrid viewpoints. Our goal is to combine the strengths of different approaches and representations that can capture diverse feature patterns and enforce different constraints. We execute this hybrid methodology at two levels.
12
+
13
+ First, instead of merely synthesizing the absolute attributes of each individual object, our approach predicts an *over-complete* set of attributes which also include relative attributes (e.g., relative poses) between object pairs. Such relative attributes better capture spatial correlations among objects compared to only synthesizing absolute attributes. From a robust optimization point of view, over-complete attributes possess generic consistency constraints, e.g., the relative attributes should be consistent with object attributes. These constraints allow us to prune infeasible attributes
14
+
15
+ <span id="page-1-0"></span>in synthesis output (c.f. [\[17,](#page-8-0) [10,](#page-8-1) [19,](#page-8-2) [2,](#page-8-3) [21,](#page-8-4) [54,](#page-10-1) [15,](#page-8-5) [48,](#page-9-1) [39,](#page-9-2) [55,](#page-10-2) [51\]](#page-9-3)). This approach is particularly suitable for neural outputs that exhibit weak correlations due to random initialization [\[55,](#page-10-2) [14,](#page-8-6) [38,](#page-9-4) [29\]](#page-9-5). We can therefore suppress output errors effectively by enforcing the consistency constraints among absolute and relative attributes.
16
+
17
+ Second, our approach combines the strengths of neural scene synthesis models and conventional scene generation methods. Neural models possess unbounded expressibility and can encode both continuous and discrete patterns. However, they typically produce single outputs that do not possess useful signals of uncertainties for synchronizing object attributes and relative attributes. For example, suppose we know the uncertainty of object attribute is high. In such cases, we can replace it with another one based on the attributes of other objects and the corresponding relative attributes. Similarly, we can discard a relative attribute if its uncertainty is high. Our approach addresses this issue by learning parametric prior distributions of absolute and relative attributes. Such distributions provide uncertainties of generated object attributes and relative attributes, offering rich signals to regularize them and prune outliers. Moreover, they also help enforce the penetration-free constraints. We introduce a Bayesian framework to integrate neural outputs and parametric prior distributions seamlessly. The hyperparameters of this Bayesian framework are optimized to maximize the performance of the final output.
18
+
19
+ We evaluate our approach on 3D-FRONT [\[12\]](#page-8-7). We also provide results on SUNCG [\[40\]](#page-9-6) to provide sufficient comparisons with baseline techniques. Experimental results show that our approach can generate 3D scenes different from the training examples while preserving discrete and continuous feature patterns. Our method outperforms baseline approaches both qualitatively and quantitatively. An ablation study justifies the design choices of our approach. Our code is available at [https://github.com/yanghtr/Sync2Gen.](https://github.com/yanghtr/Sync2Gen)
20
+
21
+ # Method
22
+
23
+ We formulate scene optimization as maximizing the following posterior distribution
24
+
25
+ $$P(\{\overline{a}_v\}|\{\overline{a}_v^0\} \cup \{\overline{a}_e^0\})$$
26
+
27
+ $$\sim P(\{\overline{a}_v^0\} \cup \{\overline{a}_e^0\}|\{\overline{a}_v\}) \cdot P(\{\overline{a}_v\}). \tag{1}$$
28
+
29
+ where P({a 0 <sup>v</sup>} ∪ {a 0 <sup>e</sup>}|{av}) and P({av}) are total likelihood and prior terms, respectively and ∼ denotes equal up to a scaling constant.
30
+
31
+ Likelihood modeling. We model the total likelihood term by multiplying unary terms and pairwise terms associated with vertices and edges:
32
+
33
+ $$P(\{\overline{a}_{v}^{0}\} \cup \{\overline{a}_{e}^{0}\} | \{\overline{a}_{v}\})$$
34
+
35
+ $$\sim \prod_{v \in \mathcal{V}} P(\overline{a}_{v}^{0} | \overline{a}_{v}) \cdot \prod_{e = (v, v') \in \mathcal{E}} P(\overline{a}_{e}^{0} | \overline{a}_{v}, \overline{a}_{v'}).$$
36
+
37
+ Each unary term P a 0 v |a<sup>v</sup> measures the closeness between the prediction a 0 v and the corresponding recovery av. We model the variance and employ a robust norm to handle outliers:
38
+
39
+ <span id="page-3-1"></span>
40
+ $$P(\overline{\boldsymbol{a}}_{v}^{0}|\overline{\boldsymbol{a}}_{v}) \sim \exp(-\frac{1}{2}\rho(\|\overline{\boldsymbol{a}}_{v}^{0} - \overline{\boldsymbol{a}}_{v}\|_{\Sigma_{c_{v}}^{-1}}, \alpha_{c_{v}}))$$
41
+ (2)
42
+
43
+ where ρ(x, α) = x <sup>2</sup>/(x <sup>2</sup> +α) is the Geman-McClure robust function [\[3\]](#page-8-24); kxk<sup>A</sup> = x <sup>T</sup> Ax; αc<sup>v</sup> and the covariance matrix Σc<sup>v</sup> 0 are hyperparameters of class cv.
44
+
45
+ We use a similar formulation to model the pariwise term associated with each edge e = (v, v<sup>0</sup> ) ∈ E:
46
+
47
+ $$P(\overline{\boldsymbol{a}}_{e}^{0}|\overline{\boldsymbol{a}}_{v}, \overline{\boldsymbol{a}}_{v'})$$
48
+
49
+ $$\sim \exp(-\frac{1}{2}\rho(\|\overline{\boldsymbol{a}}_{e}^{0} - \phi(\overline{\boldsymbol{a}}_{v}, \overline{\boldsymbol{a}}_{v'})\|_{\Sigma_{c_{e}}^{-1}}, \alpha_{c_{e}}))$$
50
+ (3)
51
+
52
+ where c<sup>e</sup> = (cv, c<sup>v</sup> <sup>0</sup> ) denotes the class label of edge e; Σc<sup>e</sup> 0 and α<sup>c</sup><sup>e</sup> are hyperparameters of class ce.
53
+
54
+ Combing [\(2\)](#page-3-1) and [\(3\)](#page-3-2), we arrive at the following formulation for the total likelihood term P {a 0 <sup>v</sup>} ∪ {a 0 <sup>e</sup>}|{av}
55
+
56
+ $$\sim \exp\left(-\frac{1}{2}\sum_{v\in\mathcal{V}}\rho(\|\overline{\boldsymbol{a}}_{v}^{0}-\overline{\boldsymbol{a}}_{v}\|_{\Sigma_{c_{v}}^{-1}},\alpha_{c_{v}})\right.$$
57
+ $$\left.-\frac{1}{2}\sum_{e=(v,v')\in\mathcal{E}}\rho(\|\overline{\boldsymbol{a}}_{e}^{0}-\phi(\overline{\boldsymbol{a}}_{v},\overline{\boldsymbol{a}}_{v'})\|_{\Sigma_{c_{e}}^{-1}},\alpha_{c_{e}})\right) \quad (4)$$
58
+
59
+ Prior modeling. We model the total prior term by decoupling attributes and indicators and by multiplying unary terms and pairwise terms: P({av})
60
+
61
+ $$\sim \prod_{v \in \mathcal{V}} P_{c_v}(\boldsymbol{a}_v) \prod_{e=(v,v') \in \mathcal{E}} P_{c_e}(\phi(\boldsymbol{a}_v, \boldsymbol{a}_{v'})) P(\{z_v\}) \quad (5)$$
62
+
63
+ <span id="page-3-9"></span><span id="page-3-8"></span>![](_page_3_Picture_17.jpeg)
64
+
65
+ Figure 3: Left: scene layout from predicted object attributes. Middle: scene layout by synchronizing predicted object attributes and relative attributes, i.e., only likelihood terms are used. Right: the output of scene optimization that combines both likelihood terms and prior terms.
66
+
67
+ where Pc<sup>v</sup> (av) models the attribute prior of the vertex class cv;Pc<sup>e</sup> (φ(av, a<sup>v</sup> <sup>0</sup> )) models the relative attribute prior of the edge class ce; P({zv}) denotes the object count prior.
68
+
69
+ We use generalized Gaussian mixture models (or GGMMs) to model P<sup>c</sup> and P(c,c0) :
70
+
71
+ <span id="page-3-7"></span><span id="page-3-6"></span>
72
+ $$P_c(\boldsymbol{a}_v) = M_{\mu_c}(\boldsymbol{a}_v), \tag{6}$$
73
+
74
+ $$P_{(c,c')}(\phi(\boldsymbol{a}_{v},\boldsymbol{a}_{v'})) = M_{\mu_{(c,c')}}(\phi(\boldsymbol{a}_{v},\boldsymbol{a}_{v'}))$$
75
+ (7)
76
+
77
+ where µ<sup>c</sup> and µ(c,c0) denote hyperparameters of the mixture models. By GGMMs, we mean each mixture component is associated with an optimal mask to model the selfpenetration free constraint between pairs of objects. Due to space constraints, we defer details of GGMMs and visualizations of the resulting GGMMs to the supplementary material. Note that our approach learns µ<sup>c</sup> and µ(c,c0) from the training data and refines all the hyperparameters jointly to maximize the output of our approach. The joint optimization procedure is explained in Section [4.4.](#page-4-2)
78
+
79
+ <span id="page-3-2"></span>The prior term P({zv}) models object counts and object co-occurrences. Similar to the likelihood term, we model P({zv}) as a combination of unary and pairwise terms:
80
+
81
+ <span id="page-3-5"></span>
82
+ $$P(\lbrace z_v \rbrace) \sim \prod_{c \in \mathcal{C}} P_c(\boldsymbol{z}_{\mathcal{V}_c}) \prod_{c,c' \in \mathcal{C}} P_{(c,c')}(\boldsymbol{z}_{\mathcal{V}_c}, \boldsymbol{z}_{\mathcal{V}_{c'}}).$$
83
+ (8)
84
+
85
+ where z<sup>V</sup><sup>c</sup> collects indicators of vertices that belong to the vertex class c. We again model both P<sup>c</sup> and P(c,c0) using 1D and 2D GGMMs:
86
+
87
+ <span id="page-3-4"></span><span id="page-3-3"></span>
88
+ $$P_c(\boldsymbol{z}_{\mathcal{V}_c}) = M_{\gamma_c}(\boldsymbol{1}^T \boldsymbol{z}_{\mathcal{V}_c})$$
89
+ (9)
90
+
91
+ $$P_{(c,c')}(\boldsymbol{z}_{\mathcal{V}_c}, \boldsymbol{z}_{\mathcal{V}_{c'}}) = M_{\gamma_{(c,c')}} \left( (\boldsymbol{1}^T \boldsymbol{z}_{\mathcal{V}_c}, \boldsymbol{1}^T \boldsymbol{z}_{\mathcal{V}_{c'}}) \right) \quad (10)$$
92
+
93
+ Note that we again initialize γ<sup>c</sup> and γ(c,c0) from data and refine them and other hyperparameters jointly. Please refer to the supplementary material for visualizations of the resulting GGMMs.
94
+
95
+ <span id="page-4-7"></span>Substituting (9) and (10) into (8) and combing (6) and (7), we arrive at the following prior model:
96
+
97
+ $$P(\{\overline{\boldsymbol{a}}_v\}) \sim \prod_{v \in \mathcal{V}} M_{\mu_{c_v}}(\boldsymbol{a}_v) \prod_{e=(v,v') \in \mathcal{E}} M_{\mu_{c_e}}(\phi(\boldsymbol{a}_v, \boldsymbol{a}_{v'}))$$
98
+
99
+ $$\prod_{c \in \mathcal{C}} M_{\gamma_c}(\mathbf{1}^T \mathbf{z}_{\mathcal{V}_c}) \prod_{c,c' \in \mathcal{C}} M_{\gamma_{(c,c')}} \left( (\mathbf{1}^T \mathbf{z}_{\mathcal{V}_c}, \mathbf{1}^T \mathbf{z}_{\mathcal{V}_{c'}}) \right)$$
100
+ (11)
101
+
102
+ Our goal is to find $\overline{a}_v, v \in \mathcal{V}$ that maximize the posterior distribution defined in (1). The variables consist of primitive parameters and primitive indicators. Our optimization strategy relaxes the indicator variables as real variables $z_v \in \mathcal{R}$ . It then performs alternating optimization to refine these two categories of variables. This relaxation strategy not only makes the optimization problem easy to solve but also facilitates hyperparameter learning (See Section 4.4).
103
+
104
+ Specifically, when the indicator variables $z_v, v \in \mathcal{V}$ are fixed, the optimization problem reduces to (we minimize the negation of the log-posterior)
105
+
106
+ $$\min_{\{\boldsymbol{a}_{v}\}} \sum_{v \in \mathcal{V}} \left( \frac{1}{2} \rho \left( \| \overline{\boldsymbol{a}}_{v}^{0} - \overline{\boldsymbol{a}}_{v} \|_{\Sigma_{cv}^{-1}}, \alpha_{c_{v}} \right) - \log \left( M_{\mu_{c}}(\boldsymbol{a}_{v}) \right) \right) \\
107
+ + \sum_{e = (v, v') \in \mathcal{E}} \left( \frac{1}{2} \rho \left( \| \overline{\boldsymbol{a}}_{e}^{0} - \phi(\overline{\boldsymbol{a}}_{v}, \overline{\boldsymbol{a}}_{v'}) \|_{\Sigma_{ce}^{-1}}, \alpha_{c_{e}} \right) \\
108
+ - \log \left( M_{\mu_{(c, c')}}(\phi(\boldsymbol{a}_{v}, \boldsymbol{a}_{v'})) \right) \right) \tag{12}$$
109
+
110
+ The objective function in (12) is continuous in $a_v$ . We employ the limited-memory Broyden-Fletcher-Goldfarb-Shanno (or L-BFGS) algorithm for optimization. The initial solution is first set as $a_v^0$ and then uses the output of the previous iteration.
111
+
112
+ When $a_v, v \in \mathcal{V}$ are fixed, we solve
113
+
114
+ $$\min_{\{z_{v}\}} \frac{1}{2} \sum_{v \in \mathcal{V}} \rho(\|\overline{\boldsymbol{a}}_{v}^{0} - \overline{\boldsymbol{a}}_{v}\|_{\Sigma_{c_{v}}^{-1}}, \alpha_{c_{v}}) - \sum_{c \in \mathcal{C}} \log(M_{\gamma_{c}}(\boldsymbol{1}^{T} \boldsymbol{z}_{\mathcal{V}_{c}}))$$
115
+
116
+ $$+ \frac{1}{2} \sum_{e=(v,v')\in\mathcal{E}} \rho(\|\overline{\boldsymbol{a}}_{e}^{0} - \boldsymbol{h}(\overline{\boldsymbol{a}}_{v}, \overline{\boldsymbol{a}}_{v'})\|_{\Sigma_{c_{e}}^{-1}}, \alpha_{c_{e}})$$
117
+
118
+ $$- \sum_{c,c'\in\mathcal{C}} \log(M_{\gamma_{(c,c')}}(\boldsymbol{1}^{T} \boldsymbol{z}_{\mathcal{V}_{c}}, \boldsymbol{1}^{T} \boldsymbol{z}_{\mathcal{V}_{c'}})) \tag{13}$$
119
+
120
+ We employ the same strategy as (12) for optimization. Our experiments suggest that 20-30 alternating iterations are sufficient. Figure 3 shows typical examples, where relative attributes and prior terms provide effective regularizations for object attributes.
121
+
122
+ In this section, we present an approach that learns hyperparameters of the scene optimization formulation described above. Specifically, to make the notations uncluttered, let
123
+
124
+ $$\Phi = \{\Sigma_c, \alpha_c, \mu_c, \gamma_c\} \cup \{\Sigma_{(c,c')}, \alpha_{(c,c')}, \mu_{(c,c')}, \gamma_{(c,c')}\}$$
125
+
126
+ collect all the hyperparameters. Let x and y denote the inputs $\{\overline{a}_v^0\} \cup \{\overline{a}_e^0\}$ and the optimal solution to $\{\overline{a}_v\}$ to (1). Finally, we denote the objective function in (1) as $f(\Phi, x, y)$ .
127
+
128
+ Our goal is to train $\Phi$ using a validation set $\mathcal{T}_{val} := \{(\boldsymbol{x}_i, \boldsymbol{y}_i^{\mathrm{gl}})\}$ and a regularization loss $l(\Phi)$ . Each $(\boldsymbol{x}_i, \boldsymbol{y}_i^{\mathrm{gl}})$ is computed by feeding $\boldsymbol{y}_i^{\mathrm{gt}}$ as the input to the encoder modules and setting $\boldsymbol{x}_i$ as outputs of the decoder modules. The regularization term $l(\Phi)$ combines all the loss terms that learn hyperparameters of the prior distributions, i.e., (6), (7), (9), and (10). We defer the explicit expression of $l(\Phi)$ to the supplementary material.
129
+
130
+ The performance of scene optimization depends on whether the ground-truth solution is a local minimum and whether the prediction modules' initial solution reaches this local minimum through optimization. We introduce a novel formulation that only involves function values of f to enforce these two constraints:
131
+
132
+ <span id="page-4-6"></span><span id="page-4-5"></span>
133
+ $$\min_{\Phi} l(\Phi) + \sum_{(\boldsymbol{x}_{i}, \boldsymbol{y}_{i}^{gt}) \in \mathcal{T}_{val}} E \\
134
+ \left( \max \left( f(\Phi, \boldsymbol{x}_{i}, \boldsymbol{y}_{i}^{gt}) - f(\Phi, \boldsymbol{x}_{i}, \boldsymbol{y}_{i}) + \delta, 0 \right) \right) + \lambda_{s} E \\
135
+ \left( \sum_{\boldsymbol{y}_{i}' \sim \mathcal{N}(\boldsymbol{y}_{i}, r_{s}I)} \left( f(\Phi, \boldsymbol{x}_{i}, \boldsymbol{y}_{i}) - f(\Phi, \boldsymbol{x}_{i}, \boldsymbol{y}_{i}') \right)^{2} \right) (15)$$
136
+
137
+ <span id="page-4-4"></span>where $\mathcal{N}(\boldsymbol{y},rI)$ is the normal distribution with mean $\boldsymbol{y}$ and variance rI. Specifically, (14) forces the ground-truth solution to be a local minimum. (15) prioritizes that the loss surface of f is smooth, and therefore the local minimum has a large convergence radius. We determine the hyperparameters $r_m, r_s, \delta$ , and $\lambda_s$ via cross-validation to minimize the L2 distances between the optimized object attributes and the ground-truth object attributes on the validation set $\mathcal{T}_{val}$ .
138
+
139
+ This section describes the details of predicting initial object attributes and relative attributes. In Section 5.1, we present the encoding of object attributes and the corresponding network architecture. Section 5.2 then presents the encoding of relative attributes and the corresponding network architecture. Finally, we present the training procedure for the neural models described above in Section 5.3.
140
+
141
+ We use a similar approach as [42, 56] to encode a 3D scene as a collection of object attributes $a_v$ and object indicators $z_v$ . Each object attribute $a_v$ is encoded as a vector in $\mathbb{R}^{12}$ . The elements of $a_v$ include size parameters $s_v \in \mathbb{R}^3$ , orientation parameters $r_v \in \mathbb{R}^3$ , location parameters $t_v \in \mathbb{R}^3$ , and shape codes $d_v \in \mathbb{R}^3$ . The size parameters $s_v = (s_v^x, s_v^y, s_v^z)^T$ encode the scalings of v aligned with the axis of the coordinate system associated with each object
142
+
143
+ <span id="page-5-3"></span>v. $r_v = (\theta_v^x, \theta_v^y, \theta_v^z)$ collects the Euler angles that specify the orientation (i.e., a rotation) of v in the world coordinate system. $t_v$ specifies the location of v in the world coordinate system. Finally, $d_v$ is obtained in two steps. The first step uses the pre-trained model [49] to obtain a latent code for each object's shape. The second step then performs PCA among latent codes of all the objects in training set to obtain the coordinates of the top-3 principal vectors. During testing, we use the synthesized code to search for the closest object in the training set.
144
+
145
+ Let $N_c$ be the maximum number of objects of each object class $c \in \mathcal{C}$ . We parameterize a 3D scene using a matrix $\overline{A}_{\mathcal{C}} \in \mathcal{R}^{13 \times (\sum_{c \in \mathcal{C}} N_c)}$ , where the columns of $\overline{A}_{\mathcal{C}}$ are $\overline{a}_v = (a_v, z_v)^T$ of the corresponding objects.
146
+
147
+ We adopt the variational auto-encoder (or VAE) architecture in [56]. The network design utilizes sparsely connected layers to alleviate the overfitting issue. As shown in Figure 2, we will sample the latent space of this VAE to synthesis 3D scenes. Specifically, the decoder of this VAE synthesizes object attributes. As we will discuss shortly, the output is then fed into another network to output relative attributes. The object attributes are optimized by feeding the the neural outputs into the scene optimization framework described in Section 4.
148
+
149
+ Unlike object attributes, relative attributes are forced to capture patterns between pairs of objects, e.g., adjacent objects. Like object attributes, we encode the relative attributes $\overline{a}_e$ of each edge e=(v,v') as $\overline{a}_e=(s_e;r_e;t_e)$ . Here $s_e\in\mathcal{R}^9$ denotes the pairwise differences between the three scales of $s_v$ and those of $s_{v'}$ . $r_e\in R^3$ denotes the Euler angles of v''s pose in the local coordinate system of v. $t_e\in R^3$ denotes the center of v' in the local coordinate system of v. The entire set of relative primitive parameters is encoded using a tensor $\overline{A}_{\mathcal{E}}\in \mathcal{R}^{\sum_{c\in\mathcal{C}}N_c\times\sum_{c\in\mathcal{C}}N_c\times15}$ .
150
+
151
+ As shown in Figure 2, the network architecture of this module mimics the U-Net [35]. It is conceptually similar to an auto-encoder with two major differences. First, we do not sample the code space to synthesize relative attributes. Second, its input consists of relative attributes induced from predicted object attributes, which are expected to be noisy. The role of this U-Net is to produce rectified relative attributes. Please refer to the supplementary material for details.
152
+
153
+ Training attribute synthesis modules extends the standard approach for training VAEs (c.f. [25]). Let $h^\phi$ and $g_1^{\theta_1}$ be the encoder and decoder components of the VAE module for synthesizing object attributes. With $g_2^{\theta_2}$ we denote the U-Net module for synthesizing relative attributes. Here $\phi$ and $\theta=(\theta_1,\theta_2)$ denote the network parameters. Consider
154
+
155
+ a training set $\mathcal{T} = \{(\mathcal{A}_{\mathcal{V}}, \mathcal{A}_{\mathcal{E}})\}$ where $\mathcal{A}_{\mathcal{V}}$ and $\mathcal{A}_{\mathcal{E}}$ denote encoded object attributes and relative attributes, respectively. We solve the following optimization problem to determine the optimized network weights $\phi$ and $\theta$ :
156
+
157
+ $$\begin{split} & \min_{\phi,\theta} \frac{1}{|\mathcal{T}|} \sum_{(\mathcal{A}_{\mathcal{V}}, \mathcal{A}_{\mathcal{E}}) \in \mathcal{T}} \left( \lambda_{\mathcal{E}} \| g_2^{\theta_2}(g_1^{\theta_1}(h^{\phi}(\overline{A}_{\mathcal{V}}))) - \overline{A}_{\mathcal{E}} \|^2 \right. \\ & + \| g_1^{\theta_1}(h^{\phi}(\overline{A}_{\mathcal{V}})) - \overline{A}_{\mathcal{V}} \|^2 \right) + \lambda_{KL} KL \big( \{ h^{\phi}(\overline{A}_{\mathcal{V}}) \} | \mathcal{N}_d ) \end{split}$$
158
+
159
+ where $\mathcal{N}_d$ is the normal distribution associated with the latent space of the object attribute VAE. The same as [25], the last term forces the latent codes of the training instances to match $\mathcal{N}_d$ . This paper sets $\lambda_{\mathcal{E}} = 1$ and $\lambda_{KL} = 0.01$ . We use ADAM [24] for network training.
2109.09133/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2109.09133",
3
+ "month": "2021_09",
4
+ "year": 2021,
5
+ "conference": "EMNLP",
6
+ "title": "Preventing Author Profiling through Zero-Shot Multilingual Back-Translation",
7
+ "arxiv_url": "https://arxiv.org/abs/2109.09133",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09133",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/tex_files_extracted/2109.09133",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09133/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09133/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09133/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09133/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2109.14651/main_diagram/main_diagram.drawio ADDED
@@ -0,0 +1 @@
 
 
1
+ <mxfile host="app.diagrams.net" modified="2021-07-26T17:34:01.262Z" agent="5.0 (X11)" etag="vSwhcdZsyOVePfqpjjN-" version="14.9.2" type="device"><diagram id="pj0nguA0aVuABrV1kQuH" name="Page-1">7H1Xe6M61/av2YePLwlU4BBMMe7g7pPvwoApLthU27/+k9ImTjJl79l5E2ePJ+OCMUXr1lr3KpL+Epu7k5m5h6iX+sH2LwH4p79E7S9BEDFB7IVvOd9vgQQ+bAmz2H/Y9m3DKL4EDxvBw9Yy9oP8asciTbdFfLje6KX7feAVV9vcLEvr693W6fb6rAc3DF5tGHnu9vXWWewX0f1WCYNv21tBHEaPZ4bg4Zud+7jzw4Y8cv20frZJ1P8Sm1maFvfvdqdmsOWt99gu978zvvPt04Vlwb74lR9MBqf2fOiWcL6wYTSbovz0//73IIzK3ZYPN/xwscX5sQXCLC0PD7sFWRGc3mp3d/W4O3h9XfDpbhlOgnQXFNmZ7fJwoP9J6OE3DxgRHpuw/tbgApTlBn04Z/SswQnFD8J+EHT4dIJvbcHePDTH32ga4edNE+x9hWOMfdqne7ZRjYodO4kG2du8yNLNE2hYA6j3vw/8V4j71lTfF9Lr9nvWPI9wfN4yj9uyYOsWcXV9zrda6+EMwzRmV/MkHhl9a/gHAVEZXB8lT8vMCx5++ByCPz/W/yB+cbDCzcKgeHWwOxk+3f0/Fyv+uVgZ4Pd+4D/IsY7iIhgdXI9/WzM1dy1m382jp31fy/yp18MnALzRi/4pBB6OQkEDAAQhJYgCLEnCdROLr/uThJkk7nYVMRMLfKNniVRuyM8f6J06mihL/xda6B+0KwRCg+Bv7Spet6uAGrL4/IFetzMQG8/kIuI32pkIDYE8e7yXQiMf1soPEn7dzo+sAEjXLfm6ISEWGg9QfWjN1y0JCWqQq8c7tST9eUuyfn/gb71yFfxciazuNU539bTB9TbhnR4alMU25rblQdtkmwE7TFzwpmPdHl9vFO62vq2K1vF220y3aXZ3heJa8gLPe9rz2TcrCSOmlX9HX5Ef9iuJKR8kQCQiUaAQY3zVr2TCbuvZg7wCA4Ls9wCLUBJkSAEQXmMBYb7Lt7/36lS/oLr+QOEHUGDCJrIkyRIVKab0sfmeFIPYIIxuQEFE+O7NZ8aC/AcLv4WFO2mz/o4RQAISJUm6BgOEDREIiMhQxBJ789pKfCIwPDqwt42GAPo4oG+hQSZUdAn7JsxcP2Zif46hu8d7IgWCBoECArKIqAgARi+QwviEgJ/9fWqkwK+AlA/UGwQ2BCgjRIgkUPZ37WHeFJ2AvxB1+IOFH2FBajAHQQCQMQqCkEhumE9A8Q8YfgsMFDegKMiYCswIAAKu/Ywb4xO/EKr9/GBgbELy0VtgkISVSD6KT0hCA0gyhYwqUJFQests4hcCnJ8fJx+oNESGBQCZjBAQRCIS6YajE/AXYn5/wPAjMLCujxmnlCmggoBF8sKE3BafuM245XvJljW7DBGWMBMfkCgjALdMD/4rgciPoAdYaEgUigiIkMhIIvIt84MvEaX03UBavwkU4knB6j3BIDYQlURKCRaQCCQRXbPFm+IHwpcIUn4gGCBoIBE/czJf2JCb4gfClwhEfiQaaEOUoSRJAhEgRpDecsDhUfhfHg0fwSgE1BAZkWC6g1AAGQcVbphSCH/ilL8ZfpIaIgZUFkQKKRKvsUBQA4j029/nhsKXiFJ+ZD0EZiaCYkQJJQSga0+DwsbdRpEnuZ7C2Z8UCX/ikL9LLXldIHMxBFmCiBB8ncqickOQRBkDAil/I31qMHyJOORnLYYADArsGUuCRERBEq7LVCXSkJ+J+HNzztuMUH4epYHlBkMBkDFzQCTmfV7HJm6LSvxXIprvhQVCGkh+Vnx+HZqguAGI8KpW+jkWhIYoCs+KrtFrMPD8iEifVW6/Gxq+RNjyI2shRNZtGWkkggixKL4a0nNDbEL8GlHLT1oKAXhBtkgYSAgDxAsLcktkQvwTzvzNTAeDApCpIIoIijxmeR3OvCk28RhUuW0wfGQlBG1gwkiARBARsPgyW35LoQnxT7zyNysnUAMLjBoy+wAhRC/y4bfFJf4rAcsPqZuADURlWZIFjGVKxBchrJsiE18inPmBZIJX2DKOQPBjzdUNc4kvEc38QCzIDYBEKkFJJqIAIL0OWN4UlfgSAcuPLJkgDQCYLCnDAWSIQMINc4kvEbH8rBUTQuPbQB5ec3UNlJviEn9imb8Zo6J8nDiEREJQwPTFuC4+gANK+NvfZ8YC+hKxzI8smEANwiwHFZgfyl+veaUoNwiCMsUikPgb+VNj4UsELD+0ZAI2MJGIQNl5ZAmI14EJRBvM/0CMcvLBwfhzl18+EqHbBsOnLZmQGljA3MPAmHHOFxYEo4Z09fjUQPkTzfztmgkoCgwEQOav8Fpp3Bab+K9EM98LC4TwgeCQ8QWZMGFL19nP22ITXyJi+bEVEwBhWQQUQIDoi+znbZGJrxGx/HnFxHthQeIzrAqMWgKAZSAL117GbfGFPyHL34xEyXzcONMIEgQy4JzghgnDlwhZfmRZBGlIFDBXE0JMCZFezER3W4zhT1zyN+sixMaTR0kECV/ntW6KMeD/SlzyQ+oiQINKSIQiZdRBBi88zptiE/hLxCw/MgHauCuthOCx1PJ2ycTjIOY/WPinXgYfFM6QIBIsiQiAG+YS+EtEJT9SL/CBwEgikFBAZPxyUsLbIhNfIiz5eQsjBIYSSJhmwJiK6IZzGfhLxCx/Ien1bhaEMDCITCFQCQjSY6jnkwr7SwQlP1DYstgQBIB5NhNDRF4M1aIN8jTr1N2bT11Iib9EVPIDwQABu0ZKBUpEGQAggRczn+PGs4mn+JtPjYYvEZb8tMUPtPF8MksqXCFF4JNdPi/G/9zU8kvELD8ws4UpHxfOXAwgAhFD6VPPV0m+RFTyA6XNV8iTqSQgUWS8QXyRrbgtykC+Rujx48DAB9pghKlMEGBiepHUvi3GQL5G7PFzzglBJV5jLYpIRkgWXwz3vS2+QL5EXPIDc5wCr5wVCGOwSJZFQfrcJuJLBB4/srwB8xHcCAsA373CmyYMXyK6+JH1DUIDiUQAVMQywQTKL2Y7vy3G8CXCj5+0wgHJDVEWZD7BHFMa9EUd9Y1Rhi8RmvyIVOYnleeXCC5+YGqaNHg+GuOH/DSRrzOON0YKvkYA8QMLFcTGVQHTi0GXt0UK6NcIMH7SSgXQ+MEUQLdFCuhtssdHsWtxFnhFnO7Z9sDNi9+S+sO0LN93Ih971aNKkJgcn808/IaBIKQhSs/mHiavJf0ogX9ftG/xPbLlbbROWUM8lzE5lunjF//L4wvbqrAdBHI4ffuSbVjfS/3bNtPN3F2695/vRUL+Ok1PAb9GI3CLMgvyx3Oze7k//f1ur9DGxFZcI+q69+9TjqArVfGwyd3GIQeCx8QcsO0qB0HsuVvl4Ytd7Pvb70E4u8ctB90ryEnviivIQPJ8fmrwYrQmhA0CXs9N/QxmEDEu82yK7McZfJ7jTMAN8erxbrh7i5fey96Pq1+CnXgNu0ek/E3wiv8aeL+L3afNd3d2vfVmbpbtre+91Gd95u/d5w30XPquPVeAQkO473H33Ze87rmvOyuUG/T5j+hHdta33Ia/CT34z6HHJJnlrHuBZrqv0m15Z8pv2FC8L9xERBlL+qblr9F2VzT5fA2Dz4086S0X5W8iD/9z5DkDi/2gFbj+H7x9N0DCp5Eg4OnxYk4qpt2w/OzxukoXigywb3DeO5C9F65+oZwi2PtKlqU1l8nWzfPYe0uwrxfvDvww+Jea+FkrPY5RfN5Cj9uyYOsWcXV91rfa7OEMwzS+6ztP1BI2rjr8iwC2KDL5fHvQ6+PnaZl5wcMhv4nl9Vlk3JDecHN+7SyFm4VB8eosd8J/arDfwIPwBw/fLAizAE9uxh0u3gMPIp+KCH7XTn0sHH6hcOI/AweEYOOq36IXksIN8s8ggKh0deSXeufVgd9b7L9QQfGfETsBtPFcNi8CmEhsCP9M6gShv3fgd5a6/AtcwCuz6o5dcRH/PQjc/4Jd4pyTswZ+/Lh4/p12emBu95/OD59+EzoCuG+6H9288JEYw0RoYCh/czeuocCXVHjuZaB/CDhIGrII0dNprs4CJakBfwlwTOju+dluB75D/oMe9MPzfufuvncTP26qbzfx3UYAcuP7Np3//rlX+LCC0Leedn/v/26/+xvVifHO5X3gySfquqtgO0zz+CGsv0qLIt2xHbb8C/UpmfBWxuelX1Wk3IFy80PgcXSv4xPv6erdKZXHreBxC3vvu4XLfMv7j4Jx2Id/Cc14qg6cGnTMMFXYoz+aRPokZO8MnT2pelNZsFcNb/ubgL1pL/Wtbk8dtB8I7hqcp9Bhh2oNefSsuVT7xlYNS4ZwQQBKLbjYTSawy3dxWuPjairlXhkuLFHvYLRYrozVQAfFsnb55YSXfGgO2zv9tOyRqDNLGTCMrgm0ZlBsOq2+OhiVSncjZWwzrhmOjO25PK2r5C9BDZyuPL+IJTUv1c67cLl6Btvu48t+w15xHZt9vWLvxLolmYKjQF03HRVu7L6jqhvbX+raxLaXemv2/e/S7gxu/fFk2l3mgtMeBXUvzs31sLUZsdvojjb9dKVbZrCP6GDPLkJlWGaqZ+yF5KhGoZ9M4WTaH9tiVa29QEsD7XKhNOkytqBWZSsNtgmwm6ZQdJ2wmKbtZl1oDlOxggqIPzzQQJSBIQyFE7FU5h0L3KvPD90Y7Yba2Zu4fVMK4iQRD6qhtjpnqZSFYjWHeODvJ/DoxsfpdjlR5t3tWV2stTrIpke4k4Ku4dJNyXuNYFRp5kymRtfYndVu5Y+3y5mxuz8T+xsv5OGY6fUiqOaSv8uxOd6APgmSo7iSpUGwOhOb7XzJadMuurYwPntjtmt14BcrtVB/dJgAx3T2B3YVTrqZ+KuDzEQu3J1BjV0rnRrT6WLsro6uULRH3j7CJGhVFKFaGumnQTtOs3NpJ7lAg2Hs9PZeldSL1TRfmIpylOedS3tzgdOIBPvMunjCtqT5ypmaVmKdNtQ/QmshL6covr+fodXdnP2hs7HTTpydj8X9daxXeetknXuaxfYhSu0NJoa81oCKg/EmDvNpPJn5zgjk3cXMiPBgtQuqPUdZuz/stat+SyuES6cqSTHLYiWkPvsO6Yt+UM7nl8tlvFaq6aa0yvEWLybSqgDh4AAHm8NgrDvmaHPoSK2InRmJguZVXAH4QbIu552tf5aDU6Bgr5xbZwv0kyURhheMsRys9/xaycg3E+doXGarizktqtVZiuvqgqXaWBS783KW+ezSlnqkD1paOBGR3hppsza/5wx1MHstK9ZLjOXB433F7fvzXRkEAZKIPzhRaJyxNxnyDtffMXuyMln/80ul7mnDYZXnKLQMdomyub8gPmuZYcWWJEn1pWMdmp44uMx2wWlmxe3YN8FMVgyUz7rGxVxdFqHVE1Y9s9UNMTuXdQZF4mWdUbgw1XQFfA5NYTWpd3qdz7vVeOUL/cQ6t5ujTdbZaC7TkKzBsLVpu7PDocWu0J12tyfFoS2XGQhatiEIq6plmuyrtHckpITlpQIdQ7uTUHmpXVi4s/RoTYz2qHcc98+HSaY7urDqJ+FkRkThpDvd7T1CsuY2uDjqagbztlbLLacdIm9klVtQZiOuts5SpDhMN0Uqh74wGG9w3IttAT902z2Nmd1UOZ6PcbWCfper5Iyce7t26i57QnBcLoZZiI1drJCWjQVDtSfGrADqpj2aGH2N6p1mnvUue6Ucz3jziJRd7bETp9J5Vxxddn6fd+ogW7qGmUm7uAXl9aJe9QExgmEG3JFYhq2qsk+tCE79teZxTcr7D2uRlnai4LgrmPXsld2U8sGeqs8u3pgMvVwW5PMMH91dd13tQXRy6/ZiB4vMTuthwu+wSgsj16gf0IOybe+WZ/e0zOVxoU/Ui3fptZv2drOczfG8tS/8pDh585ZSeaI445pAxvP+ykhDYK8cNxtlej3Pq+4ozTq8xaLoROKDO9kibVFuUdXfXYKL29djeDwta9renmGMvKbe12yoMXJbrYS1maj7BZ0v835UsltiV3yqz6duIdKT5bdc2JSHExS53cFlg/tHMqfgjG2y0u0ovJxO1mbc2TQvgns5KLyTrbODElXVcDA4s2vzS51vbDejJGJGSd3bUxR663VH3Y+HEors8Unm08AYJ8E+H50mx+ZuJWfncBMzXTcdZcrpxL8fDmxtz9VIl2kjFAxbseX3l/UZGpOV6mbuaHeounzU8MX1O5vFQopcKxoc8CA9NiM9Yl2taVKvHOohCg8dSLCP5P5wCKb6TmN3mmli6+LovrRcOs0w7/bHk2DS15SLu0oCWq42zNyqyiA7EoYceJTXmbyd50raYV2CmSJ7YzW7+qk3tgV/s5wcfZEWp6heWEasJO2ztyOVMfTjZfOylOSjHM3yMG0vJlMzuuTWfi+eR6wj7Xsi68Wd06xDZtsDs6RQsfV+0rtM2P3PEwLqjWFsciNm6vrCobBHfleZ9HPW0ba0HzNyyHH4YI16y6Q3i9FstIm6R25Di9PgAJgmvO9j6szjDILeSUVc+mrRmsLFfLlqMis2BfO1pB8tK2QYKjHXdsGRMCWizuciWeKBszml9RKd/OPMCBfjiwgZy9ldmvqlhwc6ayIzOdH+fb/znUqfQlmOFnHt+r2dOK9Ni0q2MZlGp0U6ZS20ZK0WJKjq4qBDKcUJktz+oTPbSl6YsH5nxMZgx3vrsIODyjnYXlvw96vLg1XOmZ5scvANTxIXnjcOvXnoe9xWx0rEZMmld6JtkUIccF0IobfvMzNgxCqzpl037m24YsLRPMHSbr67tLfaaiYUYj0LCxrMkRQMmVH1B4dFOfeO+Ww1WPR2kw5v71V/cWcNiilT//E8rNXUNfdJnnvrRbmfZQ5xtww3Azo45ZpcTrbqiKm1rrTcjWZdKA+8uXlyZHIpGcz4FIxjfxj343YVOYDnJQ5nK1biyj2vyothjCaHNlPFLSf0tht0mM7nVYupuYUAd1WU+9t8p593G3Vw7ikc/nCw3ydxcym3MmbJ97F96i3N4zE+QjrUoppZpBbj64YkzUVZKKHQ3pwXSJlm9/e1Ho4dBx/i1l63xZm3LsVV0cwZO1DzrlKnzbF1SsGK6VXGPYbL3NQ11tJ9e+TXkdyxwiLxRU/UtIvcc3RbbznAV1az5WA/OfL2lEWmgFWOrWrWnTK1ndSzcp2ggPeyZNwaulbYShgbU1tR5PhBs5cE6+kwpqcgiQp2AYqyHx/OYdmNFpdFMDDHA7PIFkJ3Zg67VlK2e23bwNwcZ2V2dFN+S/qeafkxGqJ2udpRMDP0qSN0nYN+7sbSAvWdUa+DCkYSmDUbVpe65vC6N5+yLDVl/nmlQtC8t1PDdjc7Ddj7/qyaaa1dJA9W/R1r1aVEPOCMxdjqLoDfUoWukVCc+51YqpZyKazs5uZAvHErIiVljoZxkaqOZKQ5619n57zscU0nn+zW5YR0qzLSaq+vAOPbrJcv6uCUr4wg8I5CkSw7o00q0eKczzKmOAZRqrRYFxoH64Ky8xRix4CyhKZzdkC6Wq+linEscXgA8bzfW0gobmlVZ3syndDNpkTmvWydwcwoxSTrnHtHqTeZsBs48+uRVUvYIaEZhh3CmlRaN3cde7/Fg/WekyPW67Qw7TS3MfGbW8OZTPg5J0MntsbpNq+2zTK6dPtaCwTTM1ktxFandrS1ubbCQawlVkhiRUtDdLYGaagQa9CLtaOlvPzuGFa1kuQtzqAY+zemCay5c6a2nQnWs007DEPuHPK/d8yQUXBdEibRBngjDQaExlurzvOE7YvAx7+WBpN/IeDJHenD38speul+z9xmd/V4DPAvtSSSXuSmX8z7IaKG8ONaO0jeiGaxRn6vBr7NUVj32XMNgb9ej8FZr4W3x+D4ZEXwz0btvktN5j2Mvz/rA7qOfL1c2pu1AnmdanoeAf3OAZ5jiJDGe5XfyrdZfvu1QCTwMl70TPlcYQjRBnpe5fB6ukpKGvDqAK8hJMOGdFVV8154us0xXl8LTxBcV0O8yH1+p8Dq7wEKivL1Yd6NR9zmKLMvgqjnQwlwQ3heuiG+AQrGoeQ3KrL+fVD8C1Wj6Ddq94b9v26xdu/dNM7/IGLk+ZngX9b1MAeE0G+FfG+sSw6R3Hhci/b/qlQPgl8Ypvab+fm/3j/PLv60775jnp2iaw/0N2p2KJKuXbCrA0PGYd7ywP7lzLoExKvasWtCz8fFQfAKyN+7o58cjK/F/N00/TtlxoVfN6c3nBm3PR6L6d1nxtXQFjsSezPSN0+ZcT+YqlPsVK3WmXRLRSlH2mbkpjR0zZBYzjjZddwo2c3LUjnHeuSVZR3HO708WKBldNTudpMXabItFk57Ye5MsOseB7NFaYWmuepYaVFjn/LQ8FqMxOEwCUJcl0GJpRbe7drGRayGgSyeME/KU3HtYbo/rJWFpiysQ3NrSmrY69jtXu2Z4Vnv3Mar2Tn0xAj3duOmsmnXfnxaUEOkECn1cJ5BbNobS5l24DIEbpvHzFt6v1ztcGyRQZQ9BDN5bLMSa0npNFHpbJ2J0ekkKTXC2u5sDu5MRReeA9vlWWe0cQxnpNc9VeWJt3Rl1vpJ1JEXWLoVW3HeumAvPy/Z1UXWyEyzIxL2dbOnpTMoz9bt3aZfrEbCrLsFnaTs16inqGfTPuPBeGM3yTwlZqSs51B2Qp591SJvbpzQYpQeO3Fzfq51hceiw0294xHZVnI66WMdDXmaY58qttlPLHqWqq6KJWHF49WE3Qnf8z6Uqx6n4yYsFrOjuzuM9G27DuhdOnBHdvjookE3DgvNnmi9rA5Ca2byVMb2WOOWhiWz6Q/3oa0szeUuTJPwLlOpRenCVHiuywfyambsjGixrqHpbJadZpiC7mEnZM5h0qmms/28dfLm6tnbtXlurK87wNMUFUmBosT2gcfe1/N4vkrJen+Sg/XQMDZyXaVCP0nW4pIOlvZ8KvR5PiEzBCafTbvp85z9mldVTEbHWThvOaDfZke0LaWpgL7qtCLMJxo1YJzd1V4YSeZL8y6kvebJ40mcuD7I9/mUeJcOzqhknVLvwKBEg8RJL3d5osRWkOWt16HiWKMwHbWa/dlRMHPau8z3IpyuYSn2LukxGWo97XBeX9zVwLHnxrmHBmosFNkitI7CujcbmmnXPE3cSjpMSh6a5nm/oZlMOgyU5azqHXszGlQ8DG4ITI7NumdqqlIYJ8VWmu2mPdHVOC27wB9PVEVblC09iZWHuoXqmKF2uuoLRIlVZayTYdadlrpcZj3AaxYGoC33VyHmeSFrrJ/1/ZLH6IEPVn41R+bCsca9i35xyDk/dnZqskC9uxzYikOovd/iYBJa8WmZb8ag6pZ0gEKrEKgPOMbUdXVC/oBnq/bzNcLlJaicQo+sXkvjuQhP2/fjuEdnRhxf2m3hMMTVpNU5Ho+RYuuKsjJPVS52d2Ofji5LDobYYYKc7axy3o3swsLJCRE1bjedjcu62QkhT1H1tpYNyz2VB2lotSIUzCJBrDIUaCEaNJXQYqK8nA/2Ie8GogOmRuTRoqcGJjHP/XDcZi25J7m076XuaKO2SbBnzXnqrVtNCVcCXfZH0pnn3Tsw6i1Nfx/PO/nWZsCz9RHiVoHhpH8hclCoTj8+LxfVTJoO9mNGyVSMTL+85FRT7zLAY558NLbLHb+C0cwku4crUPc0B+wKVi7DYN9MFMWp/WrFC2zk9LLaYom4s93gLhOBcTr3aSAmWf+C00swVENN3Zydlupl05V2lvKujRY93avbG6ZU7ImNeJ0J8VA39Qeiq8QtLQVNXQn149benrx9j12NMzdBf+Mqwnx3yIbCEg+Y/WZ6kBv80W7c3zu9yiv3s26X60vXZfrGiIDbb4WJNTUchor87A/gen+h+IJrYjV5JUk080ag6IZUkIcT4g/iCV/ggKvcSjt7+7ZIUqFd3yNMOeULc7ucTZU2HRRin33vlBuyqaOczsxmeOljAQ5Pld9q4nUVnpdjZRGHR55EW6Hcp17HaNahxTRtUM1zo4Z6n1dXWM02R1K52w/2d51E3SeQ2rq96AvIk6rh5a5qZX/BxGAdcAUTXiEA+l5LreuucwBgnO21uzw8L4BpBetxqrtMpxJjdJyzKzxv6kNOFN+edy76lglSNvvIM5qnVa3t79KJLVMjxRG4zjCnAXSYUsily6xrnCVTd83NQRnxgqvmkpkq3LKwEvaY4zU0Nd/IRpuii4IkFcxOq9tMj/UxclJ8l3S3YiWU1sPI5nVv+amdrybmqlnaTOrEu8iCm+wmxsFtJjlTl4rD22meLdm+TdGF06nhYr8ky0uwoAkatKLUmfcmx+1kkIvVrCpmcKQITrenn4rFMu9gZbfap3QoLo8Dl4FfU0pSb3F6doOp3K3l8yCMt6A4Lp/2cwduOG1mTi74w3l+Wq+rvTEz5lnSWV6GzIAdzv2oeRCK7lTdaVte1tQ/Wti1rMJRHNEXAz9K5uaSt9tIicUu5iRSPa9XI8z6yQAytPeUoBo6yajFVGzIzsHZ7HzWnx9PXa7w1YuaCxrXPNIFyMOxkZzSk6AtzUKdDpndFtTefqhwxTRwLoNWe3Nep9DN5+tLv5VE9cnJJiSFNkfYeB3tDkeXV3sl9EgG2UConfNitPOzJh6xI7rrvWBNvHOYdgZNY9vbbjb7TQ/6ph3nZKYd2D7HPTvnrrcIidvUbKf2OKQup2ja24ze2IvpOmZ6Tv4ZeW7vdNyNLEud+V0HpL3LdMR01sg064F24gCLLKGv8Sz+jleAJb5enpK+Zc+Cg9o79GLHAdswNXeov2uOgIMWk/m8jOzWZNoZ6cb+aI87Y2sz8duLjnqotHQz6waasQo2wnQxZkqPl1SE6hLgZNPvjxeisZfTxVIhsFhNa2+uqzrvLHZLpAW4TLMKhGUPqYrX4tUBl0W+Zkrp0PbqSJU3y2zQcqZdgs+TrWWrSu2GrhyUVrMryUVRLStRWl5aVktzlqHFlAqzCWoy2XTP0rCZTprccKnFlKnbRbEetgzjrMTjdgrObpMcLGviB8SaMZ7OmRWVkSwPqrmGyCA5dtuhXnRHF04bIknuGy01mw650bQFFVFO7wbNyZHkGbNHR7mT7bl222mQ9pldsYDb6/VSr3vxRAOMNtsV/+HsYJ4vEWgfg8ibwbw7Yy0ybjPSuB5L95xDi7vNcj/PZPOwDmRtxe4rI2hjhfWiX1fMnF2Me33YCnW19hNxdYDnHh3AzT5sT8qZNJymIA2XcbksBvve8CxSSZr4KJjvojsLGfAaHmaI15Ug1/tpdyZM22DZFu75AC/soEw+93UvA0UD6x7TbgJi6iWeeSe/K/bS7ojpD8ZjuqDdHV46S03jvG64jbEXM0OWnNtCcXCz+aEUKe1X5/sLXvm5kMq1rQwTvntHZVSUxLvFsqd6/on0H9S8vJLv32XErMbNpHBgEdr+PFgzlRdoo3bSAvKpbvW5jfJg8zBtM6LucCvj4A3xRwczUUO1fQ72/Zy379xAjBD15q3YaTIeEo61KIRcP6vUz3m923p4HPrT1hT2Z/bZdJe93awDBvtZdHQ3bblaCeG+msmMsXGrc8wvFE0vo82xs2kzyhDzKst6kU86XYNIZpdZBFcMxMtTcyoDPLdoiEzNYm827bOvMaubSMzYla0hs/rVfLYaDspSqyiSGVU/LNet4MyUx04+bDTmKVhaLbV05hW29XG2g7vtpDSH3APBymiTdhhh4SUWVpS5/KzjAy+FmY+2y6PWpRhHua1P2+MJgm52Rrzkr0235jrZtQf79XxZgLa2EHoJPGI/RrqmCun2uJ0dpmrILuWURnGolExSVnHxxXkLSSann0JnOz6cFEbjw7iuVoUAq+npdIAwnd/5FdlCkItu+6AF3f0A+SXzpDbEi9vMRGur/ZBpn8RLhhcpMDUntw11pEO+LKLRGt3XTnR2Y272B7xWBA0znuVW4dE1mBXy8X27imJvn4DVhHGJmB3PmbSbYax1V+JJGjI9EC1mRuavWp5zEmhxbjP3S4lYT7fGrDGVo5Fuhaq7KJzZ9nhXmbQeWox2mWAcm8vVqj7UhDDoMncKcE41832ZqBvcG3OIZlkXmLKXbb3+YHlBnmX4g2qmTbbt0aJ9EQsaMGNbpMKm7cpDPBsx9pFeFOaIpYaS5sxZmcym2+W5V3UVrNcbA8+moWvYockAgg4T4lpVGO2Yt8lcg32ZF4JXdWtJ5jx5zVt/fyDTrD0U2xfvxHBxbE/9apyCucq8k81lNIlStxnyGmeIjynsj2eMufCeP1lYclQjv94Ph8PReiEB40i67C7vKmxY32qdtS05HjtpveyBhS1BM4NrDcqTu25SQf3O5em1EuAyagDd7VqqjQNYbbDgJwtpqJ3BWFPkoQPGbYAmpgE1ux4267xtT2BPS8qhnQJvmBm7i3PM2SXZgNuKbGRgT2J4uKPg/ZCCVt+l84C1pJTz2t0iWQgDrShkucwBEvsXX8Vwkhrd5il3Wn7RsWXYn08XQWKnMzBIyEwZMkbSaU878oD6JtdRTSYeuxkkucM88qO9WZpq0+nt0PCCJdTN/FJ01VH3tLbO7swwVmta7Wbt4cDMmP7cVNZSa6vNTCBlcgDUP0w3Xe4edc55t3rs6uoAabzimRPgQWJdJuV2tQZTeTXqp567K7LeXAvtfktW9pSQkddrMeyoNfKQxUtiBy296VjOcFjtNpDRHlR27QOQ1qG92xzceXRfB90/Jn1/PoXBcNjvn/SJYY4caHYJOGCZ1wPSJOdUddCy7rRsK9FPkpf0k3v/fdAa7sZWMYynGOPlZHaoNcaz8iLWI851XehgibV3FpMyPe7O84p3De3OWtfBur3JgmE/Z/ZUXKVTdq3MHOHD1jdEuc+Qlyxa41UGsk6d772sdcrd5nEARMLr1TaqbmpYGvDxDgvmom46SWb2qs6Z0fq2njC33l+tDSgFwna4LmZuZzGh3t6aGz5z/tvN7Rn7TpepMb3W9aAVnxZFAXezYrArhEvU7m7ENZHNje266xxfwgGvZuTeK7/8YYJrfz6ZDnt7+042+ya2YISPrrlnyjKcDLQQS/2yI5ja9txuZYyViLu+tNPUWqqZQ5pmk/hS0oLuEO8GwpE5oHkazxbHvj2vhMyPgbtkrsNp5nIJrddrNEyJy6jU+iLdKbWOYbdmx3lfXo+B13JE5zDJ58AVz/1BvjoZXUGRNGbdfNXpCDtS+HtIu7NNp2bs1i27I5CsHbffa4rFqqpzN5u4PJDEOngPt/t0oG1wsU5Q5g7H+z3ItAgQtdlt8jpSOLOWxfRYGJ5ZMbno+qwVVK1dM+eOQDry944YBEzDcON5qNRZZKrVQ135sbu/C5NITKUstYf6MV6l1p15e/GYC8XSBcmspdnV+LyZNA9ip3XhVnWu54vDtNOWqvbZXu3iNO6q+3aSDDqj3SHtWIVpLfaLuUC3Ba9WVrE/0FJSZGEn7+wOzYB55OOIysfLzLOZSpHObn9SZTvjWOCkv+TauL+1NmU0WyLfjNLjwPDHk7q/Yra+tx8fp0N5JY3A8TDtdadwOsmWS7gQ5eW3Nu0uD/ddVtM0mBecR8yZhHj449zbZg9EZD2fcw0NzkfqFpOj3m6FZ9u7q5ytqkrk4Y0u9QW5vxMXlUjkecJ6lGfYvsjQ3WW2vuugtRkM55VdCPJ6XexPfISI1V53ubmZ9FcuZARlAcilpYflzPUwdxzmW+gn/iyb1tWAx99WS47Z+pTP4+EwK2fMudpcXPOcR93BJSdFlRxXu+WunU1LUdnHRHFLJlVOAZaImXXV7gEfI1RDFOyPne7eJl5pW+vWSep2MS3iub2UpU5SDmw+QOfOEO4X8uw4nUR8dMDZS9oC7ScOKnp8r9qVvfLSTmETm85mGgsrdVMyi6azA64HWs5aJCliprqcxVoaHSejKuO2m7F1ztYCbt5ZUzD0aspWNntNbdTd12m3SQ0qC6QIqvGycOuibeYDhUdtbP1O24NUuBv7IjOKeZzOYl4Nu42Zb7UlchDqCr0fb2N0T0c0mshDA4P9/lKI2iOas4jI7tGhkkLcwT25NTw5PrcYzpbME5gK9SCymIdNJ0I8QtkRDXRHdwTmFw5QHFSbec9z6ll3e05lbzIzTkHrnq+3cm3sTPm+ZlNfcpuW3o3T2HRtrPQ0h9m7E+eNy15mxCflkIsryWWsp7yr766D8sIjGDvthHuR3VFn7XixVf35SEh57XAP8cipFafHJo9cLuJ2PDHsiardjfnoG+dLv3eRzcQBBiMCRTcXnfFYx0zPiBsv2c2609KEbjG3znqkBPMtDhSk5fv2eRnedW4cM+5nTOTqLO276G64RdyN0YB7Tgd3tg2zDJprAUbBuM+MSjk4ZwceZRN2k+MuJ9EO96es56GS6UpODepFrxXue5dlfkn9zr6C5hGSQV8oppnvRnKVzaTVoErHtZD1wpkpu6zJLxfSH7SS2mKsbSCuy8LQumZHVC+WEXmxFVuJmoJm0xuKUsDjLHywSi25fT7+pnUgs72wHoXLMh+yIxrnaTOd8hhNPhu2tCPj+/ViaTp76yzl47GrdTYK7bXUsBfFyc9eOXlHp7N5x7vmvQLz5I2ib43xZlTau2bzt3LVP05VI1FuUPgsKXddHfM/CBrg+WR34uuaPRHghvhWOa3UEN4rVY3+zgLhN5y40yb8s3yfuFP6xUj32Wurntwl7tIwENzVJBqvnKRFBYEYwlpb7gdQ07PxgNvQvMj2xUrP7JiPyRxpJjunetB121nvoqiteO5m6s7aC7uaMVO/bTlbFNe5uXFGG3NhzdGBGNBOzElYHncLWNFhdPaG+5weiHfaE1lvcQeJe4W0Szg/JXQ9WHsY02OVBFwfdobtdfukDC6TYd/woHZx5n1bAtNvn2eG6h4v+alfFpfcg6k18Y6xGYWM7A+ckMpE7tXWqsVZQYsy9vzgSkdMtw80JbQHd0FkKVBSU1lwgmvbsg60xZFpsBOzKoyOHtW4pEeS9oeHtL/O7Kk+6VjHJpjF+nlmN8f+EPH6f6t99+tqL6/jeDri/4so72cjsajLAQ2SNNTtOJySyuHOX3dZCGjFWL2c3Dv5s24hdB2LOdDauN/F3wIdqwyrzWBcB5lxhM3E6dtkO1nsTvnYaPKhv6qv+S2b6R5jUbXCkxZJdzHC4zBBUsuy7YkTtJjDzpNhsp0ftv2KXRdrAdOIrLF17oXjVmu8PatsVxyFrrnrmsXdoAFtY1lt1Wo3m2bK7SO/mmwqW+msFkzOxchw3b1I0FjORht02XVOeUcp59nZSl09UhVL7RqloGmMAfrqsZg2MzhPICmmXNYTy+wzJ1drb3vqgf1PKyNkutzIzVP7MGirk4kVTsJNWGzzU74bty+6NFTOHDCc3vJ8h7qOTc3d4TLakiqSt3m0qXchp7J7/XSipK9EWjEGC2nWMZyQqddWblw6aVh7w3p3hHU5PdezI1SjCe2NTXVzsEZxbx7PkbVtx8uYkQ9vw2/ZSvuM3B6YZ2aNvL16nmyXk3YS5SsT9O+5p1IPKogKt5hJSf+ymI6OwUjpj5lMhlav6bM3qrLRww37v58xt1I7HBc7e64zEnNRNvG0ayuJykOcs9l91Gm4yHeTbpMc06Me2xSdo4B50xOjWDXxuhutmO3rL5M+uxSVmb/DPCHyYFPxhrljEjyEP43aIt1eost8zwefZMMWz1jUdmsjtbZhbqSRbh2TqctarZcjRo3DzWVonXYS5A6cuujzLr8GxB945YC50XS462ueGJ6UZijpih0mrlAcmlwBz0txnC+SyWrnOfncqPmQ13PfvHjxYLxZ8oTjiQ+i4WTmXC6bo03YOc2Uc9/J+wrzz8k2mIrEt7bd+5ih2moep83k6GuxP9ib2tQ/3DksaOlmi2XnsOhqaKIKfrayp9ZU3Y/ZXW63Lve2lJj5DtPtMhjeu7rDCRP7ISK8lxg1GraSE469ve4w6gGzIdubuez9ZLBWaqXZc+KeazJX3TyVXU5HUtIMFdYrqdchflCG02MZAZ4utzmVLbo2YAI8t8eOHTJFe/Lmxthon0vnFOdOtL9gvC1Ca8jcZ92ZGvVAPdmdwyw3BGc6dSaWNVVSo1dPuR7nRxkGUA5aHIVAG/Ujy9bd7tR1Nds99VijzpTa0oDcCtaDvTfUT5YTpjzKojdDu8lDq8u0uRNjY55tT9HFLfNMd/hA9U6cpkj2KbuZZOIm86zKWstV+yxPRopdM6fSah0o1+stGBStOSPg1ZxKpjqyrOPxnCetXjKF/pyTwwXeT8jmwAfWd0Jryh77oXkI92MCmK/fDwZ6tAjnx+PuEC8HljC77LoKZyVizZvfTk3GD5MFnS+koBpZSJzfJ5EGkFNDqa9JoOcd+y1nMovhbINiPrxsP9BmcOuys66aq8xLOuzcxYVQb6A3T6RklC5kv2Qn0UP3WGTBZLNZkuASFd5k2lIp18neOF5Xe5MJf9+S8qxztjfVaQOjO6nFi11733RKsZuoJriM+0ptKpzqQ8OZeDozVaNNNBiI90Fp0O739Lo8HknZLUBXCDLBs7t3upJuPS1j/w9VPypWs2mBql3rEI15sJPtnjVtHhbRup2IIR6f9ERZaN2zyjRLwcsVQn96jKZ1yZPyRoWlemmqSTic7Lg0BqxphrmrMmOUoubs4hbWRgQDKIRkeeBjtfirmGwlmTkIARGWsTI3ZKwt8lmvtSKav9qtiT4/9Rl/bo9GJ788jBNuYQxUb3rn8Ur21/sTNXnxwlBl1uVc2TxxoFqyH6ST0znn7o3U7g+ZC5gVlLVynQWA9SakdGGgocqoi3J4F8U4WWdGhg9xZ3/guoF7PD1NqYfRYb2JLvviWKNFP1OE9ax5MdFACV2AcTDe5v2I0fuqHebydrGAhB0yn3d342lMWZ+cJ1TeQTRy3bYiz33RA/h8kSrbraZxyZSMefHLiDsO8x7Ggr+fnPf9EYNbattkOCc5R0ixjpYVnTN3YiPuNF32c+/SGw/HQDZVFKixww5jM2ew3kkC4k4tF/k8O6GQ72eu2uMd4KEao3ue9cXZkEzX03ITJLXnJvaKYdW0mFfEVFFfWmunSSeaTJrhVLciY9OLNjFTiWrTMmZHwPwwZdxbZsMwm3tI5qxj5dy1fcSTcFtloi63vSicdI0Nsxv8X2J0p8z5t5lmtI/VdHEfzeMUaZ4I1Nja1Xlm2KhNjbus8HrSpsGlAIMIADOEowkcRl62587PcHu5pMNNBZe2OBqtLyiwYiWyDtP2oquu7jIg3C3P3GbILIu7YZ5eszN0mp3tZFCWJWiPmuHxmGfW2F9t/HIV4157O0Belc2bYXrks0gY+uQ0vGjDabgzTWYnQ8DsMuhEIWNQ++5c1E+KPmW2zMSd0WannHeuanfMFQ/v2LauTKf+3Bb2AO185mg1F5mp94jP290zbOZIVw6fCkE+lf2HbNDqsmz3+s4koWfKWU45XI2M3bHa8ObpPbDFNajSNWqeIDKlUUxGknXnJ40m04HTwc2FZf31m8MLfzaO4NpVuq7p/Z+ArqsX3/CUKF/T4g1HCT+NUvzXHSXxV5aeuB94+K+PMfxxc8qUN+eLNgSv2kwAbxTrihC8W3v9wkDN6zLyn4yveF0D/YMhCy/GRgCAQbB+a2wEAFBTmw91/G85qunTSA3h92rdHxD0g+U3XlTxvpbh4zSnV6B/Pwn+CzPO/uMy9Z801v9elIf/T3zdWj+qTv/1QnSJNq7LqxFtSDKiIpEQU2VY+IdV6U/Dep8W4nmfwnMogl+4/O9epfj2Vb5rMbn4uOT9fxt58MUgfOFxEeVbgRq74MYdxESIKQfdj+cL/CCs/cKcfn/s1BV9u8Y/fI1/6Q38v6OdemvGgr83ak98MeP+P1us5OVR/s7Yv/njyfJy9bgtfjYM8Nnmb3seHjcVV3seXu7513fXPfnSowt/huQX46beADJ6A8jo/YD81qwJf3P4qfQ76+X83rnuj7J4G8jN9i+D+fAHzf8EzUhovBxU+BrQ4hvzgIj03eYBEd9ch+f/iMn9ZGwxRqTxeHmPLSYzwgLfjc5R8NJRR+TFYX6Z0kHxldf/6mD/Fq0DstgQXp2rAcA3N+JhLPX7UrU3p1++NUM/2Xus87ushc//c2uGYfbzXuDu2cs4cL3op6uVrbLf13y/rtGygN32Q8iK668HsLDjYvUvzMOEblmkD9NSfF+psssbPZu64p9rwZ+EFkWCXgXDmF/ReD62GrxeQYrAN7r4y+707xVdwF+YHulDYokC4VrlWVNdzxn05Eg+n2CBPk1G/39C99HjbA63OXOL9MbMLT4OJB+95fVJwkok5Pc6DPzxqr8YNSQgShJG7D3F8PU8UDJfVYMhQ5aRKFAJvzFDyztK+29MuX8b0l5LXvD2PD0rJgT8m+rxJ9IWaAMBKCOBQihIFNHX8w1+rLh/IWtwW+IOIOve9C1xy4SK7vt2bgk0JHYWpsgxoQJEn03aNz3X5Juq/OdLu7+DtD+NPH9hUrUP4TVUeOUtQekNLoPExhsOskQa7zUNHYK/4CB/3i6A3+gCcOXCQHirCwBAdMX46/sx7B/NT/c73eYny89KDflqKsyXU/Q2XqtNChv0DaRg2BCvXI13w81jx7hN3FB8y7h5MQX2W9Oo/l+pD+FvlO9/Phh8CfVBhQal6BUCnk2LKv04+IDecJ8F1Hi3IR+PEPmDmQ/DjCw3kHAdrRe/xVivqrbeIiTkyfj8++hAv4CO/1Q+/CfVd9LrOPwblVv/pxlxhH55kYT/Yx7+Vtrif+JregXBW4se0PerVnyjvf5mSgC8SAl4T5j8tpGhVpZ5Vc/3EhCv0qL/9PT3R9F7ys+Tpq/OeRuJVGah/LucIf8Q8rzhw3vfzaOniXXzTVB40V+/m3t96FHfz73KuAEEzCw9wyjBsnxd0ywyFwPImHka98+v9TwvXYaYvSKBiDJ+nKv/qu6AVzfLIsGPz+SdOgMEH+jGf6+hn7cV5fYPYUgpIUh+yw8TmYmUmDxk8vD8bk31C+77N9zCn1vLa/R+Z2XufwLhB6m+btmnmnHcgAggiB+e35jAn7c8hoBhVMAM7NIbKOVQl2SBygTcPf9g4uffa3n4Cw7wO4H0u035GFBgVE7CArNhBIgifJE6p1IDPSoC/oxeWz/MJycHrPEIRBJ9w6WEROYxC3YK8f75vegghLfpYP5f5lweoPh9iggbSCYilCXWab4po/OTKwBFCr79vYIDL+zBBDHgsB/LrBO+YRtoQwDSQ0qHyhi9Hx5u03n8VHiQGliSAaaSIBEBSsIVHiCgDe72YCwxVvFmvOBT4eE287OfCQ8Q4AaRIBOTBKAMRPl66hAIUUOiIqCIEMj0CP7siLjNFO7fT9u+T+jop2iBd/FGZkkoxVAiL7JaAl+SkVfsIYoYT5M+OVZuMwH8mbQHERoIMrLNnpDELIp0HVa8NXpxmwsHfipAyA0MJEkUoChBgsF1KOLW6MVtLvz3mfDAfHYCIKaA+XIIUXjj7OI21+37+xWfH8MuJLFBZCQCCUiICrJ42+ziNmtrPpPyEIUGEQiWEaEUyFS6Vh43Ri5utGbmU+GBp+9kURQ4sZCeFly8TXJxo8Uz7yddRBoS6yUUEIEZIQG+GB97Y1ThUZo3Jt6/390/hipgsSEDxIgCZqJGSIQvxgrdFlcQvkQc813Lz39qGxoYEZFIskQIEzwVrutob40sfIkw5ocCAoIGEWUIiIz50s0CvM583Rpb+BKxyo8FBG1QEUoIcQYJsPg4odGtEowvEaz8BUR8DMEQGBoEhCVBBgRJFN90LEL4E8j87bw5u3S+YBIfmU8kUbgObBPSADIUsQSYGyO/Vb30ufDwJeKYH4sH2oAUYAgEXlWDr90PPkUslkXKgEIh46WvSy4/Fxz+hCp/n22ihkBEAUkC8z0QfQEICTJrQkWZiVwmkEqf3F6IXyJW+YmrKGBDxJi5qlACWHhRciPJDeazIIgQuRP3J+eh4m3GMT+T8iCgIctMZRBMAfuEXiiPGyMX4n8l8vmOgGCuqgwkwvQU0wEv5omjtIEoIE/1169nqEdiAzLtAimUISaPZO96GskGlBjQnsZ0vBsYvkRk82NLKDCTlQj4+H7CUEFvm1l8jcDmp62gEBoML5QAJAPGQ68Vx60Riz8hz99OikAuLMDMhSQgLvibjlqIXyLi+bEVFBKfIoZKPO9BBRG/YBa3FbYQ/0Q1f7vmAjdEWcQCoxfMYGB6rR9ujVv8V6KaH1RyITSQRPhstYAPxpWu86m3Ri6+RMjzQ8mF0IBMlgJrSihLiNw4uUBfIuT5oYCQG4LAeAWPKDDpv4xa3Ba3QF8iqvmx9RakIUJRkKkoMsMlyS8WPLkxcvErM1N9CUB8ULmF2EB8wgosQiiJ8Gl+mdtkF+hP1PO3I1lygy/vwZmFKGL8Yi0AUWwAhHk4FAoie/PZlceXiHp+bL0FaTDSwPQUEgkRXw4kRKDBKabM+CgvCpdfT2vzufDwJUKbH1xwITKBijJFCMgyHxV0DQipQXkCjGNFoI/N+2nx8CUim5+43gI0CCKUqQ9JFABE18oDkwafyU3E6OH59ZSQnwstf+Ke/0LFhSAx90OglDENSbhtdvFfiXu+Ix5oQ5QggLz6H0ovyOatkYsvEdr82JIL1EBUpEiiUGYeJ75lboG/RmDzPVcp+6kvChuYECQRJkzGN1/MoXhr9AH/CW3+dhodNJAsUSoKEoZQFIRrQnlj/AF/idDmx9ZV0AaWWc+CAmYSxeJNRyfwn+jlb9dVoAZfmxjJgggJpY897DYJxH8lePlBZRWwQSlh1kQASMYyue3YBP4Skc2PzZs2EGSQpHzeKy7X6zz6rXGLLxHZ/FA8SA3OKyTMhS5KmN42t/gSscuP1Q+4QQACArMGUOCjTV8sAHVb5OJLxC4/b1mF0LgbRMbXVcFAkMh1ZPPm2MWXCG2+66LcP7Um9G69WQwkIjHW+cmrMsmXCF5+qMD54mB8hhpJQlCWwIsSf9pgOABQkqCAiCg8mudPi4cvEbv8UDzwRYMh0/UMEhAL5IV7wcgFFQkQKeHBC4LhJ+eTj5N4fXlAfFDlBOBF3RIkiEp3YZLrskxBvKOiDA0iFSWZ0k/ONsmXiGx+aC4MS8xiyFTAkHuc8I3VJD+XxL9E8PJDJU5wQyacHTAnAjONdJ39vDUC8TXikx+JByqyS8cCYOJCEni5BNSt8YevEZ/8tPNRsBtAVBZlwFwP6WWd3c3Rhy8RvPzQxKggN4DE57IhMk9jSJ993mXyJeKTH1sbQRqQCjK5G7fBuvn1MMBb4w9fIgL5sbURYkPgM90xb4wC7mG+WOjjxhgE/RIhys9bHgEaIpYQEzQEIhAF8bYjEPRLBDA/Jv/5aWX6JWKQH5rTpg0gM8eSyrykVngaH3mbHIF+jSjjh9Y4oMYPaqBujiJ8jSDkpy1ygI2riYhunCJIt0koH2WvxVngFXG6Z9sDNy9+T/RA+rEvAUEDQ8pnBoGA4qdywydFwWX/OMcxe34dbSC0QagsQ0mWZBGBR1XyXPRUaiBRYn6KcP+M3k3yv0AOg72vZFlac+Fv3TyPvWtZf2/FxsAPg3+p7Z+13uPgsufN9bgtC7ZuEVfXZ32r0R7OMExjdj3fTIBMG0y7s8d98kF60asB4eP/HwULpOvj52mZecHDIb/J5dVZ+BqzskwQYjpAYCfBf+ckhZuFQfHqJHfCf2qv38HDLxDL/wweRCw30F3oELF+TumLQbr/Eh5EmZ2Fa3uBCnxyib8FunfHwy8Qy/8MHpAkNihCsowoEhkwXkwAIgC5AYkkYGbR2TP6Z3DAvBqf/Z45JVTCwsv8xQ/P8e5o+AVW+Z9BA8FCQ5QBECVeliC/WOyRr/0H4OOCCUTE/wwNRGbkB4pcNTDOIYry3znJe8NB/gXa6JVZFfgPsv972BDvfsGucX7HAfHjx8XT0dgHjTcUePp0fvj0m5gSwH3b/fDu4Yeij9kFCTAnA2LE1MSL+WXYaRr4G30UpX+ojAi5n92fKT1eenN1Dkh4pvXZ49fQxwDgnp/tduA75D+40R9cwndu87t3w9pMRggydsdMKZKu1TcUcUN4Tbq/e7BHQT4u94t/rNZf7C79eHck/+jo7M19M/7L/flvRIrjncv7lspdrNhzt113FWyHaR4/+GGrtCjSHdthy79Qn7y/t/z0x2Mo2zjkvy1S7k26+YF5dezjOj5xDaLenVJ53Aoet7D3vlu4f4nK/UfBOOzDv4RmPFUHTg06Zpgq7NEfTSJ9ErJ3tsee1F5TWfDX0BY7Ensz0jdb3Z46aD8Q/GCqTrFTtVpn0i0VpRxpm5Gb0tA1Q2I542TXcaNkNy9L5RzrkVeWdRzv9PJggZbRUbvbTV6kybZYOO2FuTPBrnsczBalFZrmqmOlRY19emEXuhYjcThMghDXZVBiqYV3u7ZxEathIIsnJnLDp+Law3R/WCsLTVlYh+bWlNSw17Hbvdozw7PeuY1Xs3PoiRHu7cZNZdOu/fi0oIZIIVLq4TyD2LQ3ljLtwGUI3LbB7ryl98vVDscWGUTZXwLT6MaQ/V9VYi0pnSYqna0zMTqdJKVGWNudzcGdqeiSsF13edYZbRzDGel1T1VVe2KkK7PWT6KOvMDSrdiK89YFe/l5ya4uskZmmh2RsK+bPS2dQXm2bu82/WI1EmbdLegkZb9GPUU9m/YZD8Ybu0nmKTEjZT2HssOwpq61yJsbJ7QYpcdO3Jyfa10pxVURbupdzW8mOZ30sY6GjNyr+1SxzX5i0bNUdVUsCSu2MSfsTvierEcIhqwep+MmLBazo7s7jPRtuw6oX/JuTXb46KJBNw4LzZ5ovawOQmtm9tl322ONWxqWzKY/3Ie2sjSXuzBNQqbI1IEWpQtT0TmmgLyaGTsjWqxraDqbZacZpqB72AmZc5h0qulsP2+dvLl69nZttj/p6w7wNEVFUqAosX2Q+B3P4/kqJev9SQ7WQ8PYyHWVCv0kWYtLOlja86nQp4NTnhkCk8+m3fRjDniGenUyOs7CecsB/TY7om0pTQX0VacVYT7YxoBxxvcaGknmS/MupL3myZsZ4SKuD/x79hfv0sEZlaxT6h0YlGiQOOklYl+qia0gy1uvQ8WxRmE6ajX7s6Ng5rR3me9FOF3DUuxd0mMy1Hra4by+uKuBY8+Ncw8N1FgoskVoHYV1bzY00655mriVdJiUc3bkFr8mM5l0GCjLWdU79mY0qHp7djkCk2Oz7pmaqhTGSbGVZrtpT3Q1Tssu8McTVdEWZUtPYiW+v/7qmKF2uuoLRIlVZayTYdadlrpcZj1gse8HoC33VyHujyfQGutnfb8EfksFPlj51RyZC8ca9y76xSHn/NjZqckC9bDHLnDFIdTeb3EwCa34tMw3Y1B1SzpAoVUI1AccY+q6OiF/wPYM9vM1wuUlqJxCj6xei6llNfO0fT+Oe3RmxPGl3RYOQ1xNWp3j8Rgptq4oK/NU5WJ3N/bp6LLkYIgdJsjZzirn3cguLJycEFHjdtPZuKybnRDyFFVva9mw3FN5kIZWK0LBLBLEKkOBFqJBUwktJsrL+WAf8m4gOmBqRB4tempgEvPc///sfWt/osj29ad5Xh5/XAt4yV2Um6iAvgO5KyAXQfn0T5VJupN0ZqbnPyeTpE+nexxbDWDVqrXX3rX3Jt0s4EhWoGMrow7WR2EB4goO59VI5iJLDwSzN9csXM1EvMQzY69GVe4vu9MKAm8lrylkFSBOzAlwcS84Zn7b7waPda1qAwWnQFNqdJk6RhIQhqoNfPCU075EV7D2VFA+XoFQMR0GryAMIAZNteB5Z4yGsISf5+opPNEsCLzSQv/kaLr2IyYmi9ac6HqKbSGVhOPNmQuH1g2lG9vpK2pnyIdxcYSkstquqAj+IjhQeh1ZZMDnc6nGRJlP5ea0Ol0PlQGvxvFVzDwGPOGX59Ym9rQFDTjkQWTx1+XGrBxjOFwqT9cRXwYB5BslwwJznhaaqzgQFd0tsvCkmhh6okegibI1lzLvsMZ6PWUIzt6CyMq3SMwhyh2k26FakKAmFuMDwvhrt1NPe8/lF4zVkyZ837kcwXHMOsZTxXQyoQdrX4doLtLJkN72G36Xpw381SGkuog5LBVxTDXItPHgd8qIyyakVFoTFwhJl7KyqvsiEaoCZ1byamcS1IEdbMQJsVlNNFDgAgzxQjalFWYe5sI46s4ZwzZtJSF+sAdEo3GyqeUAcipQ1o0Pr/B2HM8d4KOVv5zkE5xITjWpgyJew1GqWnTuuSqBvsECx+6YGHcgKXTs5OnKjVXlQD2e+XXrNri4h6aKnms0nxoS1NCqFCnt+tjrVFzUhLqc62LdjE3m1LQIbZqo5XzKJna2gkZM6a6LLtyqoXhZwVkHh4kjgqLcKudALDpIl7yDxslv9/CzIhngrqsEdHQB+yneMQVlzbPa8Y1tc9paHTl4Q+/ha55wdEO+9rt9t6T5Mqxqxib3jRVA8Ev8BYwnur4FscvpI3ez0vyE9c3+2+cCK0hdsXU6IrL97pokQ6V4it8Wy/1kQwN2vpmZeCZ63RVK6ZTB6zIbjQ40rXd4h4zIOMoKX92jcVvzOanTHBSMwi0J1zRcJxYO0W7w8WA7xXoOKTaF5yDhj++ZfnPVEeELk9AREmIedsI4e6MU1/pKSHu1F1wb2m1CMCqbR8RkOZM1XxxvSY0HnZ9M5rzIxqvTbkGNrxDCNklWnptgq3BJwTTAai1idG67dRm1Ir2GRwySitC2h1taLy1RORmn47E6GnikrvIOeNIZfqap4DlLY5eCQJRWznhAkJqumWsc1298CnIdND3X6EYdAuPalGtNE7xId7DamNw15Ky1qo6WdEUAyzTClNbIiNOHi19E8uVamNrKi8+CcTZyx8FOaa2WlFmKa8yhdlvfv2Sr+dZdrmWlalab5UY7bqPFbimcB6k+enosKWF8JNzdBpIePLCRCnuMLo6mudmRSsXVuz0P8D50x4MvCzJaLKs5yfTY5LYDll4MSuAP8x2ahF2XQFI6Lw5jJnDHfWvNHVcH9G170lYCPwZpwMUXTdRZru+H/UCy+2muzSVnn2qQVKBNEIrtUb+xtlhvRWS4hN6FdLvrE3uuKDc+3yxq7BaI4Kxp2ygGmgd1OlJWDCoOtQZfooBVNPoilXt9PSHZkLGcqcyF1rWR0VwRAsUgeWeJ2wZ0LbRHDbdsK8RupYQzJrQrGhYYhlEf9OlAKtj6eArRL3pn9TZl2KKJs4OHd7oHR2SzgKIx2bAPmkPKdfFS+S2nnpOYk0L4vVpAHbV03JnjAM3ZpDzw4TyVhTEqyPCM3wzGwo9VuthePNZ2a6xO9/ll31uVYd9IhmW3ERX7ZXa3kHEp9SQ0xMlAcGPl6h7hLrD9gnjQA/AvzcD5uZ9DsHgJSwzIbgQF6SX3DtdIJ41aX0P+gDpGxxa6PS33koR0nX3K6UMODVlxWxD9OWj984VkGHO4PVxwGHVEzY0r3i7Qx5cClKIgL3d7QzhEV2A+0jwXcg/PWqAOG7HoHbxPV5EfJ5DyYmm9KOYYdx3nJrJRB1w8uwso1B1kZRz6CKL1WS2EVFjc4srs0Pj6CgUFkeHPc0eEOiTdSFmKI34WmKjzkBS0Gzty5y5uequbGuyN0ltiVuVlTXBccENIpNXgcVCxIavTdBNDudP62CyPCygZ8vXxvBx33XapK4BVdWgRAjImp2/DyVu0rzEppUoafHJc3CIJWt2ChcbuMreh1R98L7Sty0UaGIqDUv28T+bxDZJHyZ2PEvQUNGlk5zL0Chfypi3x8rS9qDbyQGh+fayXULBAmytoWRugs27O0F9X/PVp30g6Q9NZt5LdxWZL4UF7o46L9XbBnNSkKBdWlfj7HltIO8Io8IaOckqWBKI+NSfv7AopvJRrneUpf4EzpfVTRPpzilWR/CSWp835ykMZn+bjEPYEPrjX6xnHa//uV7Q7guv1xVmK9cqiogv0pI7gkC+giZbCyobsUxwKe2JjVXK6lSKsZRxH6J+v74pFWJYbZPYt0xs8ym5xdFC8CRRohSL6YVxJ0qgKLNxCLZHD4znbhZjmkh6SV9aGPJDtPKWNwvnBuRJMf1tA94vP4ErXNnAw+UapT8Sg73rHOzXLuxdha1B2qdgmV/dhOJ5HACB0oTuFIU3lRREHhCNtbBBE21bHVO7Qng6mtZ+og6ZE1uBJ29NivVtMZM/E0Nj2NXFcBJxNe2uoPuqJh45YrfB1B52Vreee9jdj0HlaHo8K7blpoKxSFQKEOm9BoA1pVkJvE7oG1aXricOgjyyHdHKCRr86A7dd2ORiOlwhLpqFGw2bGvMF6J0cp/U2qwMxXctXC6ebGjc3HlQuaOVvdxqXjVQ0VrZtr5MdiykN0OG3XInHM4Bra36TTqBplvW4N7DdisXVFk8knNvel8mAy3eXx5gXWAClAR6cEnZUzlh4pImo2LG2dMM2Es/ZDrZZYNRWVXBpNdri2C1WW9yQiou9qrGD3Srl5DQdvKQVhmxFu1boAwvxcJfgZspgczNg/BiOJNsdoAfdFzvCkvqe4y4dRpHmFAk0vq0VXbx2zjzqlysON313Fxer2sOsAni8DRXJcuEuOYuJVMRRIpyelRgXnQM98mZ13KuC6BglZU80S+ltdCEDYa1fE+0WeIoSJsxQegvbUlvIn8dB20sLQWwJcCnOGBOd3aOO3KPlrdOHp6UuWBTUDgr6Rmur0Kbt5RQmmMuFa7M+BGXfGr6Ursw5x1cMAOuDMYfYEUbqQMFDCZE1l0VHc2x7KI84lD3URV+dMTZJV+XxHPgZhVodQ81TmJHv4rFtm+ZV3irq2sFVHWBnmlsy8KfokFS15tqdZeeFfGUPhVk8+O/W3C43Wm/nLk3T+613HiWos7o+lzOkdQPcoVk43m0OLnVT3vwBLQ3pbq3HOFkc29g2O2hPybB24bVCc0SfT5FCciZEXrGbb8IWa5djVx3a+bULxMbCSOBCzjkKsirRrKXvO2IHXdTjsmhVY1jeoKxfyAV066MwUXA2Jk520nvBcrdlDpXmKxF0/hfi6UZHjg5pTB5lOZ7n113f46XXW2VPTNlCP5IJ4NTjKgiSjp5Sy4G0gLxXdPl2QY+Rv3Vto1rd56YSaQ3P6CZQK0iW6daSUpo1L0tClU63xbyFqoQsTbaUhJEdoUNat9t8ujA9U1JoGRANdEC7Ovd2jbnyB6KNcizYQ9fh6gVohpIkoewaBFBKJRN7J7Wlspp7jW9yyQY7zB3SOW87HwvIm2l14VXRCZ6VoHWLBGdJlKCPKpzRveNyhOo2uOhrrEicwDREsg+HsQvabYACSXCBG/TCZCzpSPdJQbWBvakqrJUyDAiiLro4x+Getu/dplcO6gDnRZa9eTzMS7FDjkC9jiqHjGPIMMh4ngfBy1RhYB7Mb6NX9zAJCyllj3B9hf9t4Xu6d6jIpiP6fYAV3lxaDZvbcSueyeV8QlbVl7vd2V0u2GFxW4VlXue6UC2Kwlquy3O91HpV21U7n2BO/RV9no4sqQZ9my67ZXkWY+iRbzKGaybvsIKUwt4Cczu0pdL0dGHuERubJ+14ybw9FalZ3VhKtNmOZghtvVFtGtfmQnaNNWfX0F3c3bb7Pb4juf33MdX354clK0kS3vVIR/hwhlD442ac2kchkvg+Ymjs1jBBv23kxTy9rQ783a0bBhKFN3QmIjizJHcDCTi/gCvqoKwiEqJbh7Zed6hEjW1/WPUElyR9dT3eIltbJDoyN1szDHAoUHYYmOZyevGCA40cB/+ER0Xkte44WCj+Fu4RZsdr5+e23V486Fwdp0C9dZluTR3oh6IJy325aN0LyVc54IMLnFUkAfYUNOvCysAimqJGnIqrZqlXK3C4rLRkfmV1nWb63F/tOXZZXKwV9KSVuyGsdpzXuNsMLlvldigWBGMWDtUb6FNjwB0u06LGRVp1jm5OhMLxAi2aDA+YWFIHR6Toc0hdzi5h1812PbTIdkO1jtRajMw7HAqIXok/caohSmu9GmtdZBSGI0AfD5t9H4z9Qu0sHkVtVvKd7bGaYNA8cVBiNq6XX+DzUw59qxPg4lTmmf5h0vRrQ623nK3QWFVNPSk9obnNABc0DsPyILAexK1y4PLbHOJsDz0BlxitTIMeNrMl8jXVNpQlO7JDQL/QovJ4OPrGwRk9/XSrucPWU67x/EGvzztp47jos6oo75FNq9EpiaO+onlDcqC9uyLduDdaJb/y544M2QCqnssYREY5xpcJRTBK6Uob2WopeIt8dxIif03UOfwtg0KRUy2vGxFFLnf5It8qq60g5YjcTeU2mcbEqYWDKVAI9HpHOpuNTEOeIY+HovR096LiQe9rNznjY/9ExzwlddXitk/vi5vOofZTttxwYysdHXKAqzWnLOQ5nQPvlLYtriYEnsUbExqVi3VrzyjKRpTbpuxAVtKmC1cedYFciaTBuDPmaWVM+26qo2U14GqDA8skereNgowbWo8NraHejERrpJ7KBXDIpwmY1rwYNajaLDK59Iqkq0tSmDQlO+RarhVCjYniwSbZGMVZlmLXjmxgqsi8nYFXEck63V86Gx5Rubli7aIYTefZc6mBen/c7VWn0m5st9kE0vLIM8ZcSI0sL/7q/0i8U9ebetddvtHTaPOGl0/K5ri+rEpRfNfsPpKgZ6/Svv6DU7MfexYRDD57uufW68y9d8vW434iOwtts53/bIwOdVXFhz4In34D+y+NHU3CIaFJhsMwDgDi1f2ScIyaQc2HE4BDjz8m1OMEgfpMQueJpimaYhj2x9FF3cWIb8eAj++WEst9zYz6Lp/QYSjsjXqrJCHerreKQAjov6rYfp9k2wdE/zGk6BnF0CQF/5AsS5OvmskBiBjuezYU+VTX9zwF4w+O8BxVgJ0B8vnPu4Hqa2bl/2KgQrcR5jCcZOAf+n6HxxegorgZ9iIP5AdMMcwMoPwRjMUY6hvoniOKo2bMsxxwjHs3RH3N5iK/GKJwHPIQjQHU4JBhGOyp3uMpTQnMcApgJM48PP7IUn+NKJwiZ+z3Q1Dg/Ujqa/Yn+VUg9bxQBMxoBoNCB6V4oqS1N2DBoXuef0+Aw5h3g8VPdCL5ZZM9qYes/I9K9mRYZsag27+gvrr46/bL/528c4YEUFpT3Lefl0qLmZHE9/e4V+zzX8r1ZAmoxIjvKZqv2jSwBFwPOEWwj4/gz78QTc1IhgYkReMMA8CrgwFo5ik4nNzT46sl8i4Jlm+18gAnxCQhfJI+UMrDC0l9/zLfVxdoLvXTG/95YDsefoDgztfvb8IXkgca/v6aGrRBWVfR8089nGvdXyK0RB5PiTzu+1lfXgmKh31/7dWSh8TXv7Wun4i0qhG7v2Dkx5eCx2zPA7yCuH0jDbTMo+j0R+alfbApjwwA7VF0Z5p71wjENo/Po6DLvjFSd4z7Q/aCNN4jhsDQM+hDwJVEoEpi8o0KQAoik8UIhoJ/4NGeao1feLpva4b/Oq9DAP17vI4/Z/VnJP8HvP6DuRYIDP78aOBjGqewf8kSkB9adMLR0BIA1EGdgqT1ukidgoLhee3p/80SsNADgpRJohJmgHHYqzR5Ap/hEJ6PxMmCn6tI+7umgKOYGYbB7wEg1ePkS0GN49zs+eKgXliC9yFv6u/cMfkLZ8cvRpQV7z1kx/Pg9rDJmqC8eUV+niHPb53roPrwfYVnt05RKGqBOYLiBNZpu6IXW1vmFZldHQR5MFX3nK81J5cPWHVcS9dMKJw1Yawdx9weM5HVzrkmLpbm0itZPg1O254XZNXaxwlKPBjEULoM3bRZatySa5oqKGOyjYEPccRcKpI8LOYaMLYKFo7qaXcwlOte2+pFbrT+nD94sSpSbLpUrte52xappeuicIgdZU1RRXMdR185ZquLpd36w0G47qmdXtz42p/LkXexeKrrNOVKyy48oNXOJaHczSea5QqqWy9Qfnpb9hu/yilKbbR144sNeNjrEPOFONEUB7jmoFypy0Woc5FfqlJGLE85HqIdhqZhzDieZ2lhSMvzdrmBXmDjdRNI3fu1xJ4su/g5yIeBZIB169rlZRPPt1q1bVAmy1TOr1Tn1xg42DmfyZkVmiVh51et0qYD0a85g+q360gUKdpyysKp8VzrLYyLyVDBXdd1WrsFeb2SHVmw7zunulBcR7A90fHUazd4Wdfocg3QNkVLaeI4SOl4sDnzvPHccnHR03sO2cgmc4FQst0uN4uduzxHC7A5LkT+ElrVblDya1KA3muvp0jkMdcb9557WsupBuc5MwJTBSb8MkyNNZ7u9nJkV9fdbjrqyo2WUcZle3rYUiogfXrl3CglZdW4a62oGTXjj/BU+S5HiQHGLbKuUXQhQ5doZF/g1bmENlFV8DArinSKfJceqcNK3xPRkYoT7RiqeE/FOlxgcJSP1cNelTJN01XeqVr2mLV9nHdEYjuDXU4+RMCRmPDe1/PyJt7Ydjm2QjZigszaIeGUp/NyJ1Q+eWYT6RaYXlfK7lVba/mxsjdYpI17gwp7azrSKLE9NI83gV/JnbI/j9TObBcj+uba7SQ3p31KhlegicJj9ruuHk1/GCleRcdVHSya9+H6eBYdKvZPgsyv+HG5N0t4LZ5ulpsd2TainKKdLf7ih63stQuSrI5zxdm6x2K87/GBragyViWV62SpZnWUMxFa+iHDBb1KpVpU7YnYbrHlZrjQsa8sSlNfH9vtLeNlfmNWU7pydhFg4+3I3tP3L0xK5WMBOcywrEsIV4fFRBcCTQV72Szgau9IvXBY/nwezTWgVCYZUrwNiI7Q9Owxea1VkznEoaMLQVKvmXjqWKtwtFHgkwptnN3IAeKjxBPDV67bTFsrUktSpUZdmG2zt4I1ql+o0hFlJOyWmpyqg992cxd3/f6exJzzcI0eKgBYY3PBlm1z6ySbBJxn5WgX8sJC1ytq8B157PU14ZzOQXlq6HUTwrkeA6KixRUv6uISYYutW8+Nz+CONyGP4sHTxCXeFavrIV7IsqCivCuVMqui15vxMeEOwmMhPuTFRStqtGzOaahFeCF3vYxL1XTG8L7Y5XDMT4G5ruVdqoXMdBXgl5WHtTeQ+mmd8c75eNvvWsFs8HjYF/vTXt3Ukwp/czTytLaklA2xu1OnzLG17MJFpPoTlx1BL8YMXCyXsdedIyp0AGgBYnTO1ktDgke1rmj3uUJJojRIx8TOrnImYEyUZd0JEL2+7wls6+495xiYBGVo9TKpaPYIT3UAqMxAmOLwYadYCBMJV6cGX7TZ6UYHVx040BHbowwPSFneoRv0nC7TNrvRu7NA02vvVCxDtG8eWYFZUFwVo4xOhin6BcoIPuMAQ2mte8LsKpRhe2x6OBl1oxX4kUIlEIfBHDrckZWdh3chiRK5GjJvep/h4j3H0PfkTCk3xWw1HrbKNW7Pq1JM9SSO017O5iTDtfMCZairxyStbLTVrByUHnqWURg6e/VmUccFXLqCXG725yMroeoLF/LYagH5eF5kk1PmiFZtlFsHSS65kOJpscFoS8jUzNl13lKsm12XRKR5HWmWilCNBihqTyHGdemuq81Anol7rkEIF1yadSASkblUneM2iHAGzWpy5R0pG6la8u0827YoHQkCmb32UTse5sKxi25hSV2qxe1Q6MuT5i2IcY+xsbvsI/dChIuS1ik1r/PFzlPgsrAl3yfZfRQDgR8jzL7six1hCo4ukj0WZbtgrtqoCuOsLahATLUF0S8uh/0+Sw5putVxfkNeUaaJUtrdemNueoI5kJdhuxV4lC0JlHOwPeknwN402ndP+/xxyfeBuTyKXmhXDT6dxavFUdnGZGNrfrDvKYOGWaV9d1tk7gGSo5xBJJZO3jEBgbdtXt9Clu26Aa48OaPiMNKpVNTyZd4toYIxxiDSjvf0JAolryiFWU6SWA/4ab1NNVtC++k7Ijpl0yK9oqqUFaoD2ZaEKW2MZQvpbOjN1veQkQ2sXe+vuKGYTtlDdhttFttLv4WH1U70Lszdo9ITm4Vc+HB1l9OVnpQDTomT2FAg78l8l9myzx9LGf/GBu7VhJwLuMndYvTNF0NND3uUR5iUF1FMKdt0N5tqcVh0aWb4tnYOd+Uyv+zacpDXO8cXlmd3AdfkBe833nbdbOn8ErD7RaYc5NNi7Td+1rK1I0cPFhuY84IN+XRlO73X+6R+xhvAmEEhwmOJWR0oTVqY2MUVIlnowpwRR4MfY1uASmOx6z1Sr4Jree5ivlneBufim9WiDRwNi4MR0AHKooHWcs3nBWSMvqHNwJW23lycSxm+Djh8DTFg7JwY6qDzvcZus6DYixt4Jnmi4rkwNFOTXmsNZYBulwvRfswsuQSjGEIj7y3twYRkBqy0NvgOssbWdb1ss++IPgyodGlFanq8GBvFaX03vPjayW3oQ574LX4zgJWdq4mmlX0ZlFsPTlwW5HCKida4QmCE7lVem75+peTtSobioiURyWzUvaG4uOLU8U5s1MvUhYqAknJRDthZirSN79otrhbOpcI4aenmWqrJCzUobqf5/Dh6pK2fbiO2LsG+H6EA8vQztkQ5KGfRO3W7QzkZk8Gd2Kqxih1k2KvAc2Dw6bOvGwVWihYJ7bBg29Pi+JBxka7mApId9IWmnJLpvWojdERoEvFw3V1xpz1NUPCgrI4x5Kh7et7A7NiLp0MuuNH2cXtr3Nbmyknl7zn8KL9X5EfjinTKlY/nNQMvmQ33gXFJBhJ3KSs3oHh0Y6jENmaQUOMVjvFqKaw0aYUr7inwvL5xl3g8xolZwteXutJgovmoYRjSEfHUbL2oV0WCQrrqXkq3QZURwbSi4AhshJGNr6juiBMLZ0Q5kc4myq98frArMljOL7eOMa47dmf00K5LWUeG3RoPuGik2J15rztAJYH77pY8oMY27T5BSd6VwR/2q+0hka+WiOM4I2UpFh4XTZB38MSLuzosrjUpne8lKsF5zZV1bhSyk+5E4QKluAmlK19F6u5q8XR5aiukileNAgcA4+YrypIuxbZxoGJnzHu+M+KMVaCsKBtqIiyyNimTU5dFuve2ijSydsrtRPU02EQhLA9i44qIbwd4sMVa3kXQrzruVO7CRPkEZ7OElnkxv9fBlIXbZgU0wp0kjCtZV3KoTML1rjw3ywKyY32KBz/Thvgor7r5QIb1uDO2OrLIaOVgiqxKOOjluX+Bq5LPx06fSw0ObZSHVGwIuXqn3zMqb0XoRAtUj3iiDz4KPghE6Fw0KNcLtVgcRS3T5GaRNO4mZ8Ik2R+HwgU4yvAVkfY3/LDzjI18lYulWkSXOVwXV4qKMKIslonXSYEZWnOrJ/D4DGmiASxEEAHnL/eg2zNifbrP53ptzTdNPfej6TAaDrU3bbuY11bHWFOWsrEuHWPI2WpimoO/OR/LI8nHIpQ+9CZnwyMSap2udbrXut0pDjpGQmntKKbdoIop0eVrRb0mtSvyQDkehvIBOvCvGxa9c+i9xtmQmnTmgpup3EK1neSt4vjZtEkSXGwOeV0vaYTlhrkwVpFf3PUlSKv9ers3BRxaVqJvg/WcjU11itKdpwrpypiXl8k+2H7jhu4uwu4ZgaVPBH7NLSWUOBmQWuy12aj5DwwoeDERElEl3PRIXoXYxgehhxLhQLOIRRFOIn+dC2O3EAOscG1/IHammjuHwprKcA4XM7VTEKkyEeSp28QXbUyaGNRrzRbVIysJXAIGcvoitPDmFHlGGcPavPYmOdqAhxHJsuvdzZWgGk6dBXGBEOeO3Al+107id0S6V9Nc/FY5xBqGrQ7W7vE657GI4hH6UUysZctRQDWWUHsOUGkqC5C5SKtttJbpIhIKdq83J8PbLl2Td6A3uat9NZIUaoM50H+kb0WGICpBNd/gQd/LdOcjXSTMt2vZqbEg8G+bLW0C/OhxMjdBpZTsTzj8lUiOjqurDG5Dm+b4oYLjs88ueyYxNU829/68QN9xwlCRdsm70IauTkyy9MrzpYZsc0M5HUJYngvPtuwjB+XxWMRHvPGiCJcGId8oSm5v5hHoLbjSTxB2DVoPF9+uCp2+xFUNUBZ8hk7UlIoWGR4WmbsrNDm232KYDW2+P88Ftc+4NX7pw5jEuW59T8ZsUD3K0Blwds59MNmXJqJUOAepl+9RrmVY+OptczwtdnG8aNkLKpnChstg3v18HwwDoAxZ4qzWFaFX0DIMHIY8xDRpZ9NokhLogXFQFR2ME7aplGu3XhnE2ag2eKY3KolVUAFOGajw+FvlDXSgGd1Cq8OO85gqJgaI8lp+eEclvfhiA8Q9VcNBuNnIrajW5/2SvVJskgEPDjB0RdbexK0XeaKn93r4ybkIwyotsrs2xq+o7mh3ah7qIxo8zOgH/SQcT2hglKJ0zv0Kb657/rYUc6rTLXwf+zUtpGNwTaB59hz4MXxT8dBuyanRjSYbAS6mU8ho5+0y80ooUpfaEaphIdN9Rkj5WM+pfFPlMRfo67HrJdpChIEt6chijL7yF3DiVg1lxPacQemLyiajWJFnUPa/bJTHjkORhDtOR2xe94dEunJRlHQEVGFXWgnUtCamGK8G12FGx0fxFbLFmZqoNueuWNwiNdBvWC/50CvpvL1B3DOjhUPqpTdaZxkjoOuYTXw8C6yi5sJz2kJjrmShR9hhX2L4AOCiUjSMEefFGVsvNAJN3L7Dz+yaZOx4mB83FBvbKNiychG/o5UaqkPY54SWS7jlrmsX+RD4Es3cZolXSbRixtUcQbGxWijKCNb28dNqYoxJu3Uq1L9tzlzAYF4RK6J87VQbguTcQieK2wWSVJ+XqxN0Xhjle1XHEiuZxD7e4iG5FCG0eDScZtryTuleX2M9yqu+rbam6zquuMIMRxE2MilJdjWxNmUejYS8soNS7x/85gZrosBpUZrtCKlIvvNrvU2hwrclLqFCLihx3bjTkp+matwqxcjZnjKEiP6UVYoAbE8sZW1OGapSVyuuR/G61VaBIjroVbC3qxEFKxMSsWbNENuT4BhlsVq0DbiwdkJmj+LVpm9RFc9Lugmvxvq2Pp0DFWlHYBJiWmtH+novechGaNAZ+fKAbES0qGEENO4mnzvHvbwS+DnyKZPTGPnQ59hhplNThzx0WxxrLf02do7WoSLRlFpK0PsQIdk81nUn54yzCgd5znh7hA5eBpkx5pT9TdP4Ak7bWVxtVWHlE1rXard6XBwafNWgwnptOpA4kG73UOGtmz9MWdISYdu4DqoB8XIDKM5qC7TxACdou1ckfTmm4gECR1KYa6iOXADsWKTozYC+1381OnvwNqeEXVDYlt1WXqz+q/nDgMBm2LOflzv1/yFnNE0wOAkYlkW7G3j8H/zHbKBvPUWfbyA9ZSX/13cAH3e0fvmNDfXe7sd/2NiQELFthAY+tefbb9sahxjnNy6yVfOCVnnlRnsL7bzUNHXB79Wrpm0d2nS9Yal4VaR7Rp1rituce1AWpwK6nnJwdftzw+xufl2Y69E5zUNVkR1wIbTUl7Vsp0RBrnmdqejyen20NpASlE4eaPqA9yz0j9qptBjUzGUP2UjpiYQ8VnPSIhKjS12MWCaLQSVE/wCVH7Esup163UYF//CUhk81cil1e3W/ioqagU/d+X6M8ppRJYBCGZ4jLqB84uLCr6zLNY1Ppa+fMFHbreI2IPrKaq6ooc3pVG1OU3ZcLiAZyG4znFyNjTYip9ecnEIiTNCIh93K75GpEyLAqAI82W40m8bnj0HvNlbX8rd9iSopdicBxfpRIPIIxWG2XOb1OcpWLbepKP3MWFc5qnYEEmrXRNj6xUiE/bQ7ESGG6uW3C3XprnNk9so5BkylTOM0QcaCAQ0LXQyvxj3IopO1R/w2rM5VgRxNMrehV9AxqAnLXtmioiyTvoTlvPXaFqfmqLY/ufjbG6TUUpZtgbLl/LRFLTLgZYwHpRzX111nCN0QPQqMG2r2MqYKX2dyfRrZi7Mo+9DBdnCumF2miTxfB2rGQu/83phDZaJL5wcMaklUTbfbOhnZYSnCabiPyAC9bQOVB0KoHTr3eEmgqG/W5bnTffx6EPgVitMows6TM0DuzcZNBEXYonp++OJqlcurPL32G9fdKsdtdlS0W1IhK7SIH4ytExmVlDpMTPaYmmLIqRdQDeV8BK5+4qQdoRUpBecOFRfOHSziNaG4UoyAccZm21RbqODy296o7kGKVZqVktsqJXO4yPMNaoNBXezC8CSeX16DflUHxW7vuMgNFutGPMPZj4TaVERU0goPuU5XO3N8KEALCDZZMZhKXwqnJuFw83wmyLS1afJLWwQBWhotDrBQQKVqZTMxxSaJR7OckLithmzV+WkmisDKalTOz28epAOyKKa5vFGXfb9hGPM4oVi067l4cm+7kgkCyx62vlAcYrAAPtp4SaERKjToT0IUednZg/YfnjUqJ+icrc5iVHnEzd+EjbKR5kWGF6Q2cmgFBLvJg1IMRVsWhTNIW9xMoDMvC757PXDKNd2tjR593UtZFfbtvtWCHIkg+OZWtueFCqAwv0gNSM+iqASdp6oE+o7kvhtjWb4H3NJbuRp5zJwOBXTEnEnnbbIYUz3kSD9JyhCbn3qnMCz/RDspFgg8DpYESRY6mDj7HiSQDPEe7Q5CZ3iq+68q1FyqDlm0Tq4HS+BlWRujy3WXjjG3EfdSAgF4PC/XmCmtdvrGOje78iGAmgq8mKCOlAqcblRYrOw8JduvLSiAFIpaHfiVui8X593gs4zWkzZUsc1wbkI1XBAeLhrjoS0Pfg5nfX53MtHvcdw+UQ5n6I5nR+l2kYQUV9bbUsNCwoFim44HGYnoE9R1GKnBxTWl+3kkrtJlj0QThO41nB5KIsmlOHeOjlxDwXiyQidot/dmMqKortbi+oCzUHNl6g4FCw2PHneQqBpycT34qlGVehOvVjzaIisEsKv2uHmjA/fknvoDAb3A1c5QeQgaObDmfIl7JXHdzM/AKrHJ2hz3q3k8oFxMhXYBfaj9E8FE51PvYkNeWDtUcJUifq8MVrrPSZy0RnOB/l6w3p4WS+i/93aT9J4kwNERKTg507y4dIq7XKcNzgHLtsuFuyh2xOm8p7HVfBUk1Y1ei/JozAUeVYzGtj4P2ixR1MM0v4+tkUqLgWzHWCXMhVpjasuQhJqiNX2aFk1yQ22ZcCu8NzMQpmVzCGv4LirINLBiAFbOowYUyO067jxSFjFOX6e5ImWFxPuQHxr3YBh5xLnwcg3ObtiaTWwi3PkKl4SMYvguFk1o2RxpzXl0F6u5JnMoGGZucQEizI2u3WqxQci5GOIQEvd1dht3wBqGoVn25elilvQSzUwPSbbeGn1cs3yn2nPpVJd6zljBAqDaPUiP/EoU0rg6BkUXF9dyr7u7hbNcbKC4d04p3nDJBE7XYHIfrqa6gi1zOrBL6JCBwkRMrgzIO661bKsHHnWlq3kykI2Lt3BKT2cVmR317oeSHoFVIz83UEMSKd6lyPWohIZ0ygZXO0uVYptNeBX47hk1zgFBKSxQY4C6j+8NqqJLTVaYHQZNk5+B39jOSky3NTTLp7PVu7UFPQwM8NR5p8+p88jlKNRZlxN0oy7OYRoPSYIQiWpCjam8kMkwr7aYnGnrjcIUvVptVhBMW4VfoZUynTFwuO5Z6jBV+0AIAriwoSSo/PmNbHwo1qDZHUu0IVj3NnTOULiHvuzUVZpDnOKNifD7uGtxuKKWYrK8XJXnc4PnmC2Ol+I21WC5d5cP4WaKgarvYnZn3E0l6BcHkmj4Ic2Wvl3gKC/huE0zQ3LxvY94XHYTeIFR2lzaJXDWS49JhiNpIycETvNNEKh4nkFbZUC3OJQptYQekSRBMfdgA+YoNeLoBt6psUN9tZyW1313zVbVgogsv4vwroUeIvygLIvXeNAzH5R9HRg8wzA3Cc2gdtNyvocW0YFn4dOl2OlWtRmunHXauMMWNZWAfLThb4fjAgN7cwT3lk/uKfFvp9IJPHw4dy1zKe9NqZKaDXooEqBXvbuQQY97rKdvyxYPllm9ksmQozDrGGUNHXjKyYELxcq29nrpB5NAW2GpptcivqsVxtkWjKRtg0nSTzk28eibnpGc7S3CVfJu9BrC27QOlmHpMc5PF7bpGrcUmoDozLi7NG4wAKLd+14z8YwJ3ZFRMlA4Ch2iIVy86UYUpJXE9KLn1Gp52nKt27fMqksvVcdIzo5CURiz8LF935LrLi1KBlploTN3SYV83Bsj9af9XjGswWs9k5dXjWK71BqsyAXtL6Cx1BQ4aKhCV49M4Fh7ITC3yxQjMjtRndqOMdR+BJO1dH8JiYcpPRIE5U1ahwJx0RSR0dzD+311rRyomKCerBeuf+IS/yqIhuovTxKY7lGhrbeBeItlw1Cm5Hoq1xsxlG7nbUvFm6OgKfxOl+cmFEJbqOMv9bltJm3hVFxRxbSyVVjWY8HB2tloqYacT8+HW9IyBBoun14sDPWbJd/spMlcntaoSHun4IALs9tjG56rdtwscMFzjyHJVgvoJm8V0r/ORU2FpJvsNzEPWje/8vVqBb1+qpMPQqPvuvrJ978cK/6a7ElRddYLEXWgYQfp1q6sI8Fc5+v5tp9Avzq7FxdxNzOh9h+7nvOTPS+Wl/C437Gx1a9aI8ciVMoMFU4poQBcSa48Tj+s5e1ydaRt6QYBO3gq1236uX5OCqT97ETvLm0DWBvaV5yOJE/lMS/FSsmspBZH3OCSrM+zcUEl/ChL8vWwzVCeC0lOk5SEA4k9DBG3pi56V5jTxm0quTmlx1U+RmK2QqIoC2mrqG9GoW+027F3FZ+BrHJA21Q29AzsDAXv4a/r/Chqroha/5SmV8ZLgVme1c2RssZOQqt1KWc7XxhZQ0JdOe9pK9szaioF/YfJYm+jWvJYaPaizg2QBrjHTamRUFa1cg8bMxnVebo139TyYtd5kV7Mmy2zOpqSvt3Hpz00Bf0NNcRT1jvDM9QkpLbQIl0xNEzd5KHkDz2saX269wNBWpxlV5G5vaVbT+8J3YcsLU5qHIVbXOQpS8igbGC0qQPlplenPcuO+0O9RRu12m0Fcbpabe4d8kjvGp2PGwFY3K2SUC5JaWKo2UTl4lFSfAsWIdAx7TAYw07qpvK4zY/w67RygNYPr7etTlwiHaUEZOm4T2tZEujzYdsvalNIp306or6AaXcG62O2hJOW8yit5xBKO3cIS4A0IJ0uNDdHvbDc6HKWU4um+fuOLyLWzeKIK1BKTXi8HJxgLhXXFD/uiYe2KJD/TR6ZofmO5Xd2NVIK6mOlLUX+tA+3FL5YnM097YbtaRH7zl0LP2yDE+H0qF2Z3aXyWizcNKCsgx1zvou+YBOtV+XipDGERx8SXJaBDq6nthn6aKm4kCBwYPVtgPunI25VxXWh3/fhMrdESWOs7WD7OWFUJnFvbnOADl1SQDnKmtj5HpbDaEky3LyLqrg99QltK1SlRMjK9dtqTa6ytEJqtsJ2dUmbXgMVxWJ1RBlNdlVVGFkiP/uawJU0kiJHbtaMwxaZKk10h5cE5yloqSRyFXP+bu4IvXfjULibGAsH6v5CQgOPcr6CvG4MEW21Q/dAyS96Se+iCxFu9xEJT2G256GE3lE5RUNZ3qY+9FzpjHZoCJTYlaNjIkOLqRulD1UGT5Gnze0qf9ViGjR7IyRNspeWgiPqwh4lJ+ZGoa2RPrXmE9Upp4pmRYFn1jFq6VOq+CaBMuMWD2SSWiWkZM0fREW8RrvTHrUIq3u0XqR0daB7+GXJggs21Zm+mChiD2lzOB/Ei3+bSpJAnY/40LxR8s7BruCERVVwW17sisaiIVCh/o5jgou4hZcsb2dFg26TurPnxxtyfQM+SZ3ynmSmOSno12ent0Oirw0UIBJAMY60BRWNnzNncDnnur4OxJG7b4uL2dpPYmIIGcHJaV+mirMedYS3XLW92xhetV5oBr/ssS65704d91PoNqetaW9kIqr2jHA1bJChwcWQQNpAh1AY5OnUr4ZRuaGuVXFnM3N1vllOLDcP1Lwenek0ohQ15bwww748DyEWsYl0LS4LR1vMV4vbforvbYG54BIXDoLhvY8ryXCEvTjmXheuJCA6BQeda39YxyhKf6XY3XU4LbPjxKyu3V5FGTY4BI6Toka/hC5kqKuizcYPhMEJmKahhmGBDtke6oHGTGq0hQnfW9gs/dhcrds0wQVtDfCWP1HGAl+AkT15Deat6V1nJEMGULie0lZpc+t03ex9yHdQ+84LNoFeENQgPTDVKXlY/jyaMctMjjF9Xq7ke/BCQJkKvCjLSOOZSwK1m7orUIG0ilvhU6Jz86AD5mmyiGSLglgvxnp9lULh4qA+RUbbsVuoBiSCG8IpRSECI8XxyN88JnNCZ2CigcnawvjQNpQsVxxaK0I+N/Rpez5tz/e+Qr5/lNNjs7y06AJ8/cTCL7IRGlzMsMh3hZpT0nGnKZUEHBwtIZ43UFuhQ3bcFumDxVI3ZrkSR2/HxRc/GD1H3d7U/XKDMpHmJV2HqDta57UrDbU0QS1fc61coK0FPrjgA/T5fS2TtRr+16Htc75TcgM3976To3ZkE+pxkkOh7/C6sCusTbnHpnM2QEMc8Vz0lFrkEHivB7GpQTXGt1A8BXnXGmvUcInKbtVyLy0OA8lEtH4gEYCC8tx4Eulvlqck9Ut3z7+OKT5G/O5PL/DpzbyljOqU2zn01uBTxSEO811jXdMQPjWkh6e4zwpX4srRmMwta/ken19vXctZ0uJO01CU+F3LcwA3g3ROMQxO34scXoboUbcP7FlJJfdj+Q7DzCiWIEiSxgj6e83li5YCYIaxNMuyJEGxDPkndR7/sHrnJzoKPHQE+aPx/D+3A6Ee6ub/+EZpgJgxFI7jgKFRr45XdSQkAWbPOjc8lZG8GERm9qw+jwHEu43iT1TR/72yJ+YfFCM9Tuq/XmL0NDGo0PxZSfGrA/xlFdHtpybvne9NQ2A/Ua3zbEofaxJflAv+ipOLbnv7ouqV+Zuz+y/N37c+LW+Uyv5sZSyJ/d8rY//puR6Osq0OkHADOEa3v6ixDds3qm5ffvAvC29/vqC2jeGVPzI+2lB9LMaDx6WF/0cj8xtc+vqxo8If1vS2dR88bvty2A8lueji1w9HIP5Zve1fGRoWGlvi+X19fsbOPG/lhb2x3kjsnYzNt7LMN5Ad5cP/HWz/zvKAeuFpJ//7i6RI3Qtzf7imTRwcMohFAltfznE75B3Cy1+Wm9/H4Yvi/h1xzuCzb1qKpl/dfZCkuNn3hkPw8cc7VJPkG0An3gvo1FPyy2fSpk89njB69qKTxI/t6FiWmD3QyYPSf6P5ColRs2+HgI/v1ZKHwH6i98p3FOJ/0DLhT2v00Up8K0nnVaU9AwDGBvD1+lubnnvHHbi4c/hl4xadM6/Sx/P8g8WA/fXt9dCCuJeo4ySD0TT7xhQRHDHDn000815ZVAT2E1lU74X2Pxqrp6aW5Oz7GKGOKC+Ig4HvPkfxj7yB09SMIgDJ0RzJYSx4qw8Rzc6If2ctvNVD5Q+y1T5Re6rXLanYQ/x2S6qQpSn6nxkS4i8iIBw249AtQ+FfFjLcyyYScMGQb2Qwvrx1PQlRgEG9BScaPDUlfXHrenL2vCkVR7/bssO/5q3rPxUc2BmGUxycTMgDUFq81M8cNwPP5TPxyfHwN+5h+RsPb3czhOIIA4AEGIMxFE6xrxv5AjiZz9rM/GgwPhciiF8BETEe0THzFiI4wJDBX/UwfEe04EhhMQxHkTgO9cErtwSKrxnxPOD0o//9udDyNfs+fyb+QF2ZaQLgUAZyLP6qid9Xkxdfs2Pzp4IDN8MxCoNyE84byXFfW158zX7LnwkPDJhBUqBYSAUsCyjqi6uLr9kt+RUgoLZgI+otQLBESIKPUxcsOSMwwOGAJlH/fu5VE9evJi5+Io73+cHyoexBPkRlOYpmMRYnAPW15cXfKL38DYi3AcHNKJbFCBZnIUeg2tovrS++ZnTz/aYXtbQlOQrgDMuxAH9KI/miaoH4X4lWfoxaoMkZzWFQJUDEkBAtL6vvv5paIH6JUGYUxGzyJljAgY3D9wUExANBMziA9oEAaO5eJnt+MbXwNP+/AfF/j1ZiMxol/qLW5gwLmJfByi8mFohfIlb5sXhg4IyzNEthOE7i+NMAflV58UtEK38CEB8jLwhqBi0JhkP1QLMktCtfW178DmX+4+gUN6NpksDQTd8ogL2+ZxI+o5/fVZL47OzxS4QyPzaVAswARgEKI1mcg6d7BQh2Bk0NQ2L44+Mnx8PvaOU/l5vkDNAAY0iORbcB5V5udqCtEPbZ7Yy5Tw6IXyJa+YkzKbAZgzFQadIcBM7rnTEWOrNv1GV9WrB8zUjmZ2IPgM0ARZBQZnAsRAT9teUF+b8S+3xHQDAzSFLfc7lfplIwxAwnGPZ7EvZbeOCwHyqfX5Q0MDMKp7j3T+Mmf4ng5scmUlAzhqJxDhAYw9Ec/aW1xdMd/r42Hj5vHgU+YzgACI7FCYqlsJebpF9MWpC/457/eGMEn2EEIDGGpTkOY5iXWRRfTlr8EnHPj02jYGcYimLBuaQoEv77JUN8sdAF+Tu2+Y8TL2ioJjkcuqOAhTPFEF9aXfyvhDY/KO8Ceh4sBjC0oUpBhfFyY+SrqYtfIuz5oeoCVQTRFIlhLE5CT5MCr4zJV5MXv0Tc80MRwc1wAFhAAtQUDWe/uLr4JUKbH5t3AWYEzhIcQ3AoFYckXvUk+FrygvolQpufN++CnEEMQGJgOYqDj19aXlC/A5//OJgFzQUF9SZJ4QzUGK8qTElsxoDnP8QnB8QvEfn82LQLesZykBsYjMChsnx11zUU2uAIBJfHx08uL6hfIrz5wXkXBErchMYCEACjGfpl3jdFzyB1UAAjHx4/O0P8EuHNT513gWFQ5VDQdtBPSd+PUKFRE+Nnja1+7Ab9uaDyO/D5j4NZ3IwjGZylAclSNIu/arP41dTF/0rk8/0AAcCMY1mAc3BmOZYAL23Jl1MXv0R486MTLzAKw2iCpVC23qsyw68mLn6N4OZfJ168Hx5YHPXXpFhA0QSc9Vc1yl9NQfwObv7jvXRsRkJVCSgKOqU0TXAvo91fTULQv0R082OzK5gZyUBlyeIYyTEc86pF/ReTEPTvEOY/zq5AHZUpJBEwlsYo4mXu5heTEPT/SgTzg7Ir8BnUmRSgSZzECJZ5iZUvJi/oXyK4+bF7p5AcIA4AQ2Coyphjvvb+B/1LRDc/FBHsDDVIgr4HHEyCI9lXevOryYtfIob5sRRBz2icxGmSIziOJtlXFPHV9MUvEcP8vOkVxIwGFA1BgmpOX+dmfTV98UuEN39ir+wdrQkzo0iGgSKCA9Bf/exNb+hfIoD5oTPOQW+U5VC3AgwDBPt0x4SnZLwZ+aIp1meXD79EAPND8YBj+IymMECRHEbjNPHqjlHoFsnPu2J9ckCAXyJ++XnTJzB2RkPPgmFoDF4K82rDHLqr3Isbzn7ytljglwhufuh+GA0BQQAWNa1gUH+sz84Pv0T48kNnHEB/E3oOcKoJ4l5G+pUFBPg1QpQfiQeGmAEC0j7ACQAwgvja+uHXCFB+1tYUDDcDLI5zHIMasIJX3PHl5MMvEbz80L1Rgp1xUFRCM8KwNOrU/MkDEOCXCE9+bH4EPeMAAdc/yZIYQ7EvO519Nf3wS4QgPzY9gpxhOMlQFMcyNMe9anT21fTDLxGg/LzpEfBrARpHnYwIHE4l+OIC4pcIX37M7udnnVPml4hAfuiONgOvHKdI1LWSJqjX96T+YgqB+TVijB+a4UDNXiZBvWT9LyYRnrpr/fKA+KAMB3z2sh/Rl1YIzE/EJ9FMn/9wQA91VcWHPgifPo795EBTD6X1f7wqOXaGbu1LkCSNQS5+leiOk+TsWX9i6sdcEjhNM4oAJEdzJAdHnPxxoDl89iwdhXpqSv8O4/w1435dPqHDsNi/HRN+wOUf63R6BgBNEiwDWIbCnvqgPJt8CB64AjHA4AxGooSkj5z8rxnH+5PJf1+H/i8mHyo0liQAoEiWIhmMeCOm96lm/2uG9P5s6b+rWPuj2f+s0/s1I3R/Mr3vmi70V1afYLEZYOHiZmlAA+6VvPpro/+poPE1g3VPulrKWyjs8rqCr8dB17/rvFMYO0Pd+TASZXlw3GsXDAVxn6UO/zDvgLlPKotuz4qR2BNuns87A4/xfOKpd5v3nwi7xVXEt209oqk/BV2XH17O9I/T8nCIOErj/9LIPxu9p7Lc58P19Fobn4I+H16e9a1BezyDXefwep7JeDAjKbhycY7hAPeqexJ+v23uH09KV1/aQ/x4yO/z8sNZUBP670qf4J4aof3kafqgTeP+h9Pcp//biP0DRLA/EbT7n0EEdJtn5LeFSgP2XRBBcswM4wBFAQbD0H2wwOdCxE+E7f5nEEFBo89B2sRZhsUZ4tXNqXCOerNs5O8igsbZGUXiFENgqK84+5oi/vws7w6InwjbfVAkBgB8xhIMheM4YGiceLVgaXZGEP84EoO6Gvxo4f/7xpn9mhl5j3qdfkOv42GAx8Rbeh3DgMwr6Dfqt8Ok7xRCfcDyH2s9fMY93jUMu9+B7BWeZsRzIOA/uvcMFHvfBQX51OP7BTMxM/b5UYh3w9OXjuwx9FfG03OKwbHZg5qgHw3JR3LMlw74/RIcw7Az1D2LIsHDHsJLY0+wM4D9uc2iqNldJHwzez/iCW0VvwgkvNs2DfulY4i/BKBwDMy4u7V5FJAvEYVudkn8CIVngCKIGfbsfpvf+gi/CFFAWP47DPUTUcv/GfcDUKjq7CFuQCHSeMkVGJw46geq+LveB2AB9Ecfjn8npb9zknd3Pn4iUvngfPzJEns3/4OGdE3SLMuS0FNknu72evtTo/5cGhD47HEPmYWrCwNvyEUCre5/ZeF9zZS8Ryan3toLTBLi7b3ACISA/qtikHeJNj8C+s/2j3EaqoBvTukLSP2B7/CcjN7+/eeIAuSMey8QcV8zB/AXA9EfGfTvjXHAP3ZjWXZGPD/IuylM7mumEf5ikMLxGfbCi32ZmkpQaCvlH0LqD/ZQ3wFSPxHR/A2pd4PU881RdAu0ZwHcNzZH4fuz567s+6Uccm8FzMAJjUIIn6QPw/HwAnIBXwAGNJf66Y3/PMwUDz9AsOfr9zfhC8kDhL6/pgZtUNZV9PxTD+faxMEhi9unU8Iv9XDWl1cCX352da9QDCetf8s3egJBVSNkvkDT40vBKU8RDA5wkuE1kAKCQH4ITvzjG2UeRac/Whrtw3pAYEbnzILo7q3d62eQx/b4PAq67P6p+5Ud4/6QPb6Dvun6Ae/kP0va+ss9BGr2LDaHg5ciniBnOPEcfm+kzf5EPOZtenwHCP9EfO/vec/U62lC/7CDHoKiur9CYPjXdbBJFPF/5voSL1sI/kVixk972Bw1+7aFCL1F6u+c5L09bO4nIngftb3HcjMGztFTqJT8mXH7pLt73N/IxhuD2/k+369JEdL0+vHjXX1CJ3wSBcjC338nbuUBjnr3uFrvr92/DC38P1p6m+vbGB7lcfqwO3v3Qf/s3+e4zeEwIDPwaA/s768IXRWcN/UDQB9O+pPs/0aAFf38QEHolfqbZvqnObwPeP8Rc08uNlQkOEegkAuNErh/jNpQb9DR02vvgJu/kar3GzcfgJsnSzKjUSNagiYBYDHyVZoXgEofdYmCPxjHPL35caD6G7G+36D6OFAxMxxjWIrGGFQTCl4JB5qY4QA65RxNYDT2rafMR6EKjthvVH0FVOHMjEFdb6G+IqEEY7Af7rXwuWD1NwKAv2H1gbCiZlBxAw7DKcAQOPkqffZzmUAS+xsxwN+o+jhUEffUWJLFCI6kKOj0/dC4/XOR1d9I4vwNqw+EFTVjGYaiGYYGDCBe77N+LieQxP5GKudvVH2gCcRmAOAMQIV+GLp/Ef25YfU3skF/w+oDYwv4jGNpdFs0KIbhJTGfG1V/IyX0N6o+EFX07EXECn8ZXfhsev13/PxroIqbYRxKReYgpH7MPfp0ev13eP1LwIriZhBKHE7TOEkT1KuWS58OVb/j618DVWBGYBjHoS7+LEVRn9oC4r/D618CVBA2FA4lFQmlOg1oFvvcFhD/HV7/ErCCbh4LCAZgNAtYlqY+txP4dHe936j63KiiiBmgGIzBKArDAYV/blD9Dq5/CVD9RWSBomYEjUHTyOAYBj/HvNG3+N/F1e/w+tfA1Z/HFigWqnkIOAbgNMF8yxv4OFj9Dq9/CVj9qQ2ksBnBUSTqdk2xGPQGf6yf/5dR9Tu8/iVQRZOojRsLcUNj0NN7dbvpzwer3/H1LwGrPw+EfjoT+Du8/iVQhXoCMBiAohxgBAVeC/ZPR1a/4+tfAlZ/TlYotY+gaY7FWA7HKJL+aD+Q+B1h/xKwYtgZjnoi0SwGuO8Vt08Nr8CMhEzF4izH0QzOEh/NVsTvCPuXgBVgZpCkaILFOcB9u8fxZw1aPYHoN6o+OaqIGc6RgKJIDucwaO1eNXGjZhTgSBrg0FWEEox4o+Hjv4ur30H2r4Erbgb9O4rlKEATAICXdRE4M2MxFH+gGair8Lfu9vbvoup3iP1LoIqlkauHUxA5GEm86kT/6fxA4neE/f+3d2VdiiLd9tf0Y7uYh0dmcQQURF/uQkAGGQRBkV9/I8zJNM2szKrKKu1Pu9NShADi7DPEOTuC20AV2qEJBAMujgCekGbOQqsrS1ph9wz7TaCKJTssCZfHJVgEBxH76+Vhrs9Y3TPstwErrEMgKMuQLMAMwzBnGfarGwfeU+w3AauPk1bXB6t7iv0mYPXxMPDaGHz4PcN+E6iisQ5DkgSKIcATnlGNr2xaBH7Prt8GpCgQVrE4TpA45LGfLeZIER0ER2maxHAKQ2j2b7u/p8fy3FF15ahiOyyFA+cGTBKO0GdrDl2d+7vn1m8CVSzSoUA4jhEUQ+IEc91TbfB7av0mQMXQIG5CUQQD0TqcHngWVV2bA7yn1m8CVSiCdjCGolmYD8UZln4dV12dsbrn1m8CVizdoXGAK5YmGRqE7Ve9liN+z6zfBKgYvEOiNEqzFLBJzPnE+GsD1T2vfhOg+jivcG21ZfyeVr8NVH2cV7iyNTyIe1r9JlD1sQO8uhogcU+t3wSsWKKD0gxL0MfHNbNn64NeH6zuufXbgNWHWdBrc4H31PpNgApFiA5DIDRKMDiG0MjTUolXa6zu2fUbwdWHedBrGwcS9/T6TcDq46LN1aHqnl2/CVShCNPBWYLCaZxiaBI5e1rg1cHqnl+/DVih8GnGDEmyLEISOEJf98qzxD3DfiOwIjtwGMiA6Bxi7OpD9nuO/SZwRSAdhkYAnFC4SgfOXjcjlLzn2G8CVSTaIXGUoSmaQRmKYF/PNb2yIjN5z7DfBKhwuoOjFMATBURG4CR71aC659dvAlQk06FwlsZQELFDph5+1aC659dvAlQ/WG7o6qKqe3r9NmD14TTTa7NV9+T6TYDqo1mmGNmhUZKkMZyBqzFgxN9eF42859ZvAlQfrzl7beuikffU+m2g6sNc1fUZq3tq/SZg9YMnJV3dUsbkPbV+G7j6wZOSrq1kQ92T6zeBK5LsICB0YgG2GJIgX3tBHO9QGANgBYJ5gqH+9lRT6p5bvwlMfbw46LWB6p5bvwlQfbiW/7Vh6hOp9bBKk0ex7MOo8iegI48YK53Ng3COkIu2uVAvfdAx/NJx18FRhOO6SiKIkOPhYBcuC47touQXpHyK4NeCJeCupeNFQGRiVPpuFeUZ2O472+qtzFd5Vp2AZXV8XQb0BWQJAvLtRRkQhmMsiaIohZDkhUfWMMBWISzFMCzBvM0fvPOc5lMogV1YliZpGsXAoBJ7IpV+A7IuZReopHoUwyuIUUWdP/3w74M54sAOGLVpXn4En4LHf4+tbDdOdr7tZ1tGVg+QftmmOKWT5pn3/vl/+S6cFKpPttzCf6bwIvJy75TeEUrbrb99OhMQwMPJXl/A+5svdc07e/8eWSz/piCW5ed75s2Fvtn1zPgBJYfbT2zga8vwaC4uWBAniYLs2d2BDdBkRK6TcI8/pJHnwdNctKo/YxpfrB31a4aKeKD9fpA6IDsoRoEBHkniGHvGc8GpDokCUwVGeA/vb+wUzL6TCAJGgBTJMizzxB0/NVQYAYzhSSPEtxmqS/mqL6IY+YFy/LgJlP2oic9eyHkrX1Gn+bNhrTdP28o8OjUrLz+87PusUW6y/T9nF7za/8fq9js18/Madx6uvo1xnbrKHwPjd5X++xSMQukODSMAnCYpnHhi1TwqGEt3cPpUw94oGIpdiCLx74oiCeQTRU8YD27e7TE3zzIQvD2KBGxDPtuTD5Pk3icQw5X6QThFk48G63XeAMXwDginMBJ/eGfYt33JsB2CJBgYu5MI2Id427UoRnYQloQL/j++f1tXf6EU6IJw/B33cqI1ywdPM1g+b3gneveccj1+djFIByFfb8SOW1+05SlGhyOCR1VinsagJ4G1R/qMR1wKuRlsiVPULyoa+bGiER2ANYpk4MoINMO+JteBwRpzKta3noxlOjQF3Bg4HAF6ylyIuFk4OZA80ddvg8YX6nm3AY0V4/quewkaS5gC/NUg52NowAIIQ7MsgeM4hTBnxdwbg8YXaie3AQ0fBXaDvgQNlqJx53utBlydgAWumUQpgqJZgrhlaHyhXHsb0PAcn1ldtBqUy/jL1bdCg+mQD2MiggWiJ2/ZaHzmuXl/KaqjKLqDIQwKgjOMJtCz0jVKsR2EAP3IPr2/DepIokNgFM6SLM4izFMa9FU/X46yv6Gfv0AZvD4NJC9oILp00GMi+EIWlZI4+Z/3U7FPydxLv/2C1mIf44lAwSCBpIGfBwE2wqJngwSyg6EXchEneKIBHlEcZWmWYkkCR8m3eCLpDnPaCvZteLrpIQJN3jKeTk0MinRwhCAphsRoFn0OIP+OjbnpscF/wsbQDJzHhRE49ZBMeJ3TAYEDhZyamAs0PqLz7PDQl6d4vUqaAjy9ig2obwPUTYeN/wlAgTCxwx69DUEfg6HXiMJpEOG8hcJpGh7rPHi7p1TjhcwWDWD5RyzUZx4l4WceV5bHQki+8bNpGGX/XKjNnBaHPWcbHsso6OMXzakqv8yOWzAEbgWtyhG8WvEloeB7gf9Fab0Xsp7WZy9kZZ+2lX7iVNHu9VkvdfPjGR7YEieLBaGdlyQnebYUMUliHez09br5bV6Xrv/Y4osg356EwTrP9gsnkbMwiQEOD6UJCnl8f32SyikDv3pzEiBQ53Cy22Mq/t0bZQm6gyAkXKCGACEX9on7fO92Pmzq8t28QPzhul8A/yzxX9GBT4wEfqgDn0D8WzUB2C4P9uNBxy/zoxUln76KzaN+PHw7/PPCJzrNypAocUK/OfmFxx6JOb9V2zDkAVUf7Pm4ZMjf00uy8zwqAHb69aM3SYbqICco+zm1RBFgyR+LE8yxqvr6JFQHJ1gMoR7fyW/Ryx9dxKU7/ckbQo8cYvykbvMndPMToyq3LnfPqneiqI/1wncdFf5f8UIfWlQMgXPuToKJMwfxWbizKPKqXeZ7AH0+KxAl/wDMcPYTmfrvSrpdRs4TOQ1hO5Cy//w6o/Ujj6d5RWW8gLTvIzN+gnP9wqp5j814qqNPpBv45ZN8RPyfi1ygjwYIn6IvEsi5Z39X/j+U8yPGPrQRf1h0OINdEN6vcsuInyfDqCBiejCEoEO3fu3l4EPiLH14K4GfHX/Nsy/RWt7FIvIFLH6BG4u9jc+gbfZ/ALr3Brm/jV32Lv6ekrF4h6QJlMFZAibOziemoSjTgbOMnl9vebBgpw5Cg/EzguMsBkbGF/hlIB45aYN9srvfAG38jeihX39i4edlFeZBnjmJ9LL1jP73ss8gzzePkIj9qjpMHvIZkK30GjB+E1X2SwgPvs1PfnmJ5uGXp2D+eRAADqLo04HAvzDLQv1gLHD8dkLrfweSPxvaPLvGhzjhwy5/x5X9aiyPd1gUh6tQYCSLkSh6AZfEs398Im39ICz5bZED84kEytf83yvRoT/vxr7PVIDh/tnQgO1gb8wBTXcuZbaQzneVTnHmCyW9KHWgKjyTFQfQy2j5NnrsxGVeVXkKdji6H/45KXpJCOeExwpaC97ZboBIwNdV1EDh88dTck9bkact4LPnVA7wng9fMXmTBf9gQmTxY2OP9JUg58BrNDFDyQzAJwm+8UOBm4N/xfFKkw3woVsjiaRbBpGNsaWDBCbe7LotVayLWb0ecI4zYAfWcrDdWL0iHFD6NpZ7keoUlchtpwO7CVW5JyW9VLY9a3KghIE/TyVDnG6cIkzy9ToIzI1qhVEyk8uK3rQ7cKGMuQrp5Uyb1u2WoNkUym0L31yGxGufIaXWZYh2u5/0l2qpZi4/TPZ9VdVNXd/nQRganCiknCKr3NDRufk64JseJ0SCJEvSEOw4N3Vuvwn4cMKJ0rp/4Pv9aU6P/8F4axhMkgVnW4g3FD3cQNI1ko1HStO31jzp26kazzEL1bRuDMIRmWBrbLnoUgcuKfBliIy7Lb3dSqxtNR5Ge7mcmVS3HgREKactnxSVHDgiZ6PsSifJw36pOuJUODDbQeviI3GXupq9nWUbyu8L603gJVuyBGfJxhxRDSaYGquNtAIeCMYiThe8LbjBDK3c7qh1sZD0xDnGhYKgbAqnOGDiHhnNV6sad+P+IVTZMD1w0w3BRNastPKsXrbOMve0jcTPZ/y63NigYcw0a3AbIS9su1o2rbrxgZvn6kTiwz3hSRXUpZreLkOelwzEWy4d39SLnaUXmkGM0nZBDbprfaYjs0iqaBen6l1p2DsTrRxB4lQcXCjjayLn5G1vMezvbJqV4n1X5fsRuzOQItHzWD2MI80GnbDmVGLKGJIhEf5Oz7O4IYM87Qluckh0S19lwBDwaqRGXW252D32C+9Mey3VLLak4GUTvLdaBzOd32LLocgHe9dja4kQN25tu32B1+ttkBdqNBRxmmU8JdUcbpEMe1lIA43htRpf1hPf6vURD0aFMVUU/WKSbNyuuCfGXW9cmv25z2waXI8ad6dzMc9owt7h2BL4BdlesT7tjfe6xKucHtTT9WLUjTlR5Q7brKebITjzrEZRajEVljlrl8MsRkCQya8ycpRqLTBp8ogXhoqv2TtpE5TcYS4qDeMZJjoWs97e7QlzAEluj4x7Qp2JoV5M0k3RD3NdVnKyu7FQ1GtX3mpnF2jrxYME2ENelNTBBEn6XbLmTL2PVaUZjqZzyynnzg5aTBmjK2xA1L1I6+KUzBN+t4i2pWRMzUY7MDtVlw1T1oNQ0Nc9fWaBDgEHrU05PEp51W0YZDS1RorpbppViVKN5HbH7X5gsRNdg4+uk82wPwnUkJeBThSzZGMEqy688aVNk4xgebZFHEbxqrsyzY1ho4y2Oh4mSf390ttiUQtchVzQGNsFohwAC80fVpaKL1lKmhSWMMfG8R4fT3he7LrT8AEaYuziHsoDM+DMNqU1MXRTVsTksLFm2qoEDfKFaDjgBnS7YXYjMzEOiWys54rIzcUwdxToP/KV0lTiIp2nqJKjBXh1e8y8x3GIFhPjnUVooF9c20Kr3saBwtnS4/0G6A2/K6hdWLaynrlSvukfO9S1Q8weJGm3Jbf4vM7Mcp6O4u1s2pNSI1tADMYNhPl4mte2TvCKgfjNfGv2QU8EZpHsKdap+jNttIhHDOj4bVYy+WYeDeOxEcIeW2JY2K4WYi90uxwwe0OxVczMKlRemaNKCUSd7OBpshaEQemSKVB/R4wHUVB1DXNRNQ1BeESLTk1UkzeRvzC4UX+C7OJeo06l/XhMYK2StUnLbxRjbbYreKMbCs9W/q6bDG3MG8h9MSVTZwAuRm/Kvi5yTY4saafceraBzHqHrCuk9iTq8oHaKGozr2ZbQ1MFMwm9rLsUF3G72ZNFvy7nDFQOwY8hFYv3BJM0zCBQrIT0ltO+QNfkbrGhx610ABBIjkLnV0pXGcXAIGnA+8n+zlAdtZhMYiHARuFc26V5IUQ02DHUZ3I62CCFhTULx0pWGD3Bmr074iLw6wQobhVbS3acTt142A70chSBK1x5uGvLTa1Mi4LT+TrTsjiYBQ3BrIbgfBWwbT4JtIP3o6pczspYEQJiBPAq0xExFrqbsTkTON40ZTngtoWf4iPbWgDDQTIM5aLJIl2ISCb0+IAj3LHCNz7mWmUOAgVZg67BpGwDgsvxs83DNh76DyjREi3Ix2O7vfaooT2e0yVsZGb9ybpHgUPaQWSsDQDkGQjeUFNPD0SPSwzTGhraHt4AgF+muUbDrGy8GidJtlCme7xQwnwFpED5Otrbp/qc93c4WblzNMBnUkqgaUYxSatEkkTXdO0oaCUv+jPzYKpS3zaJUAYGqB60q7gF9xkPhHa42LYAgoyhhGXfRxe2ta96B3892ckK49WIo5HpMOYOy62b9Q5eBDQd3KNJG8OpRETjuF6y7Qahe63ubZDtXh1oi7gXzRWNHLO7ZTWjqqY7XPEtw+4OYWmEewYiVcbIhHRtYO3AkRPLWSg5Am663R2QfgY+8d3pBpYc5MZsa8eDNgccJrM2xoy7UAZrhx0P5IjtTpHDMI1nqUF1S53RuL0UqhOdC3lT5nS6ejBAS1ra1cCNqXlfhGhMwV/fsfKlAvx6d88IQld3RD3vT9IFMmH3XEDNbDEd7wlkOWq4mRwsBGE4EPckwPx81o+CUmoYNx7HudNDKHffA7rTcKY8nMC7wLVwH1jpwlbSyaCfKSlaLd141JpWNI8Gwp6BvqULVWmlkXID2gMB71DkgsYWp/3CBffYtmSAqOqKWa/h7SayxAPTb6pCUBwoag/TkQC7erdguyCy2RvZDDnQPc6S+wQMOXcxVdklsqkG8+2aZ6dLrN3GwD5PJjbYiSJdfcYHBIxkoI4FQ4LzRBA31c2cccfFDE3W1ITr900V2AwJRF8kCKgPkwRYW2DHwFFkxraUvubtKURDNjs+X1OGzUW67Gk7bGkBLEvhpr+sTXga87Dh+GjCdaU1dyFi1PLVAZhHPTfMkj7Gx9zEtMZGnxTmqgrj8+8cAMHVD8+mCZOdC09XAPsxF/hHcPrDd1FEcPYTC6t8LQn27oD0UhrrnczXBXYHKvLCpxJf2PcmvnAM75yV1HG8c2H+JEV1Lk3tJjtPy19+w4gW/ZtZrKfPJ0XpT2SxvlbK/juZqkeB/bCM/ahNv5DT+kVlvlRd+WI+nvyDk+7PzvVmiutzXh54EaH38aTVk3mvm+rj+bCfnap/pkrXMcv8Gy0bXMOQJFkcGDSUen4S+XM9H+3g+Ek2/u2EVhztkAyG4ASOw8oAgl8gWINzoPS3eTPmE/X/byrM/qh3IVUJQTAKpwjQUQj7pjQLfkWx58T0hZX8qAvVPuL7sqOfICP/bwUGzDsLwzxK8F/0bXH9ksjI7xPZb1gkgfmD5p+5ZP7ty+b/k6b/bvm/BN2ndrAORp6WWc8IcB0WPfnxaY3PU8sPolr2LdTB5u+KdYknmsO9YvtrcfD3VGFpDD4b/KUM+xpPZ0XYs3DgnRrshXMwwGe+oRg8OVT2jAfzDnPt95HCLrFjzhH6Qjd0IT8kcj/ykfgZeDD2HDwo+zPg+SWofIbNexlSf4bdSGPo82j8aZIF+TkovGUXYux5Wyh11tY3MwaI37DyEY7/jmWLzlv5Smhwd+q/36n/YN7RE+afzCF9ifp5YUDxbesP0b8/OXERyF+A5fTBkmHIkVHxEyvVfTPAEn9VXSe8nsfFT/Ai38ILu0RPpb8PX5dI2Q+S8qIdPOVj973g4Ni/35TG+nlUPtJa/R0s6j/xWIFEnz3vOdv1J9egvAj0Y1f9TwIa7RDn8/EuYBr/o5j+ykP1bpiiJprwO/tAUeNG1UTyIEVtbx4panngY87SDKdLI+7SGEbJ2EpcZGNUlMrp2PwHgzDMqqVU6lEDS/qiAuvkG0nSjVUahj3OddaWM+vN9d2M182kayREtN8qa2OyVuaqTWwoGdVjxQzqIp2jO1oLD66WbekN5TYZxUqwwghFyNMDChLZKHo1XrkkSRe72Ida09d6q17DjVtTG8kuKraGPdIZxHr5PpN5p2i3zaiu2q2L5qrpFpESBss1OTYCOPdsuFeXXUg+69JejW9nUGu1UDflscgF+pgABoBnfC5XuLkKPuo6KyHivIhMuXHjXuGkBR/VdEHlI22Tj1albklmXy0ESNk6zHRh6mlHWpvaOx69y9hVFFkT+FeF21E5wat9Pab9OA8kPQosamfgDaMNFhVGLFuSYWNYi6W92aDCBoY6kdbidAQZOvLD/+SyJHnBn+79Ui5QITZGOpWY87TZTmXBbSGxQ/S6OiII8nzXDRoxhBXqli+0mGC6qq6bht+FxCxYiNe3m2S0A9cFekCRQ3WqHobBtNudJgce7EqGgaOkA6WClfaVuFbVHq/2BEHJId0HXk1psWo+22OQ22NQ2mrQMqi8mE3WRJv2m22fq+3yoOaOFPKcyg/kGhNFqRl6fFFZQonaMUpVFpS1qSqj6Z4Re8mQ34C/fCcHG2cmb5Wmtxn3eNNUAzNYB1WybbbptNdKjMZBrhWcKy0biCUDg6iITkrWYULtQjbZhut9GsCCdiY1DU2NuFCspsicmfVlI1goRncrt/082LvaPi3QfW0d9rMC5UOTHk4Vfr1RJ9HQjmxCTXrRIlp1G3cNb1nNR4MJshHMRJ24GX8wk4XZi8PtUkFGsJtahtuPdyhROdWMiUft3JoU/oQbTYFMNHUoeOADz62lYA3+slkpp+KmmKe6La20rOXWkTWARDjSre3ZDAgNclHm29QcCFSRF1Kk08Qh9LMNZcrVUiBXg3AZEWNIYAKXwjuzZGPHFDte72DH7CA1h5InhRX2cDppw9bODothVmrdCVIN9np3zXSTYCvnoaQWseWAXhtuicMa9HWrqU3KoDD3zc9HUOVXCOWN3Xq8s0taS0eiiwcNJwSMxOlB7GDVRoAG2K7x6XYem8vUNba2vPebmXoYKa0bjafrRR/162a8HKWQP3KoF8JkHfSbGXcYGdsRZ5colfgWTnlqMpAfKEhdobCEuPDEyBtnimh5GyjUFbFwyvmiv5kPRMLkMa9c6pZq8dkU3GWSOFEzr7hIkmZWsvC1I1p5zQRi34QU1BJ5T0AWKhm5mWSwqylaamDveI6N4vGK23PC0IiGjtJsDaWpB6hT2TklBBzQStrtU55fB1ZRh5DAkerpIu1VAx0BAjz0poYeAEPbuLY8lXuH2miirRFmLUkmVaBq3ZaQDEvej/lG729mWxkzLMswVdXicnm4t6Adh61oPsr6XYhCRJyMQlWXnIHlOKLuNEPQqTNur4oI2/VX48zVpEY1ghwyACUh0IXDkB4vciHFI9kukyZsnRqy5Sg/LvpRnhOsR4ObiU0ntstd2V0sewfWnHD6fiwGandzJG92Ub/q2ijh72yaUfiJqhbFYRt3h7GFejakZM3JzKTWm8KJin6gWuCVacomyKYU4tnWyB9L4TywiyLdRIuxis3adMCNocbuYffruZIXQjyn7Tnj7yYqgdsQaF1ljLJKbDAjkUGGbjHqGuYsQmdrIuoaiJeNxRmaOOCsS2FZunEfnLtqKdodS0JD1UQtBuBIyHQMnKIqfXO9XlB+G1auaXV5GtpkdxqtdpkChJ91mW3ZP+jrXbNGw6PUonnaywSjxgcxryDtdMTtFW6fDwRUNkxXAq5qsg7HY3wBr9VHeqOhtK+LgqoHFTLA/BJz9cHRVtKJK5bgb7MbhdVyZlXELu1uwmkLeZdUXQo6JKeKg34IEE82UszNxcGBB5YFuOWkF3hWEVr7+shB3JHMfqHwcaCZKZTGGHSNtnV44IxyQpi1TqWucWSMYgG12JDjVQb/xeOEYS1s71PYIuJsmSXF+XY27C4p0VumK0qym9GByXqTSePVm2kMPYxM7NfDw3TJequsoZUeJLDxwLscdnoM7pdXWc/PzeawxUetx/RGmhTUZUWDXt6XPgK0ieAGqC8SO3lf1doD+1I9qJG6ifrZBtoGyOQaitxeCzerddhmVbEn5qOSw1YzoVWIMRc4CEn602Q7CncHZtcLtmwyn6MUaHJrD9KpFdFAJ+2YZlOUmDhOj2NtD3cR8tAyO93ZWRGkCSutV4d2Qvr2kCQxLzMP2WgC4JbrOqXZ1BYipFqFix1tr1pyjaeixHpbtx1OtSnCKjzh85EBmtEnhblPGYyAtDUocrtsiADupyx70xQZQ0s0OMxG+EyjrJVVr/147zqxvgRYVdRongBTNGJWYmP2Q9MUAktSQ3k9DNcRMIm8oMqzArEkg5sOF6UWlLZLsDDqWBrHvg9BnDFOOJNfJMMwMAfyGvgN+F8sD6y+rOvAMkL6+Hz3YN+g8Y0xWk703WEm60SPlntHfprZo/22QsYhgigBOjFRLXTLbA/PkbRtrq136ELHJ5NVS/hqxIXqxurNB/zyyLqDlL7SEQLgWZx1L5oLfc0Q+ok5rusa6U2EoCi2pTr1lmuvXkbksJeMCXdX2gLkhh+GoiyZjdaKmhWkigL8ZIAAv4z0wwBEUNnAxqWGkyzgyxSyP1mn3CF1eL2vLCEDXNclzrI8W8cyhEi9Mb4S5qUiDSkP9rsr6xTr7wxgslW2qUfYQ0yybBe94cgwY/pAwyin1pYTOS12a9g9w8docYXs8hUhNCihMJOImjDqL9DVPl7A+l+KfT2KJ5m3z/kisQ56YeG98zTobxzzXKrh/90R+TD3jnNKq9KJsuNIHM4qfWIOvU4ffWWw/d9IH/0IZGepIgp5C7KLqSLs+1JFl2bhfTEb/hmIfbmVe079j+XUP0YtSkM+zsnrdWqIIv56Nv2/wPW4U/2+3/qeuXj6AnIv8fRwukN+E3Zx6v1M/e2YX+2Ygv/3abkJ5bPLTfyOQOBPP0MFXt4jJYb4XcAEKOi8WtrhNR+SPPv1wjP4LpEIfiIshYOIHIr2pWwPlDF8iPlw6f8B</diagram></mxfile>
2109.14651/paper_text/intro_method.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Method
2
+
3
+ In this section, we detail the training procedures and algorithms for our two stage framework for source free unsupervised domain adaptive 3D object detection. Algorithm [\[alg:1\]](#alg:1){reference-type="ref" reference="alg:1"} details the iterative pseudo label generation procedure illustrated in Figure 3 of the main paper.
4
+
5
+ Algorithm [\[alg:cap\]](#alg:cap){reference-type="ref" reference="alg:cap"} explains the steps involved in training the uncertainty aware mean teacher with the pseudo-labels generated in the previous stage.
6
+
7
+ :::: algorithm
8
+ **Input:** source trained model $\phi^s$, unannotated target data\
9
+ **Output:** $\{Y^{pt}_{i,J}\}_{i=1}^M$
10
+
11
+ ::: algorithmic
12
+ \- Perform inference on $\phi^s$ to obtain $\{Y^{pt}_{i,0}\}_{i=1}^M$ - Threshold with $\delta[0]$ - train $\phi$ with target data of $M$ samples annotated with $\{Y^{pt}_{i,j-1}\}_{i=1}^M$ - Obtain trained model $\{\phi_j^{pt}\}$ - Perform inference on $\{\phi_j^{pt}\}$ to obtain pseudo labels $\{Y^{pt}_{i,j}\}_{i=1}^M$ - Threshold with $\delta[j]$
13
+
14
+ \- Output pseudo labels = $\{Y^{pt}_{i,J}\}_{i=1}^M$
15
+ :::
16
+ ::::
2111.00162/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2111.00162",
3
+ "month": "2021_11",
4
+ "year": 2021,
5
+ "conference": "NEURIPS",
6
+ "title": "You are caught stealing my winning lottery ticket! Making a lottery ticket claim its ownership",
7
+ "arxiv_url": "https://arxiv.org/abs/2111.00162",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.00162",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/tex_files_extracted/2111.00162",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.00162/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.00162/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.00162/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.00162/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2111.06636/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2111.06636",
3
+ "month": "2021_11",
4
+ "year": 2022,
5
+ "conference": "ICLR",
6
+ "title": "Closed-Loop Data Transcription to an LDR via Minimaxing Rate Reduction",
7
+ "arxiv_url": "https://arxiv.org/abs/2111.06636",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.06636",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/tex_files_extracted/2111.06636",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.06636/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.06636/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_11/main_diagram_database/2111.06636/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2111.06636/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2112.00735/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2112.00735",
3
+ "month": "2021_12",
4
+ "year": 2022,
5
+ "conference": "AAAI",
6
+ "title": "Reference-Guided Pseudo-Label Generation for Medical Semantic Segmentation",
7
+ "arxiv_url": "https://arxiv.org/abs/2112.00735",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.00735",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.00735",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.00735/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.00735/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.00735/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.00735/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2112.02306/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2112.02306",
3
+ "month": "2021_12",
4
+ "year": 2022,
5
+ "conference": "CVPR",
6
+ "title": "Toward Practical Monocular Indoor Depth Estimation",
7
+ "arxiv_url": "https://arxiv.org/abs/2112.02306",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.02306",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.02306",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.02306/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.02306/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.02306/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.02306/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2112.07194/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2112.07194",
3
+ "month": "2021_12",
4
+ "year": 2022,
5
+ "conference": "AAAI",
6
+ "title": "MDD-Eval: Self-Training on Augmented Data for Multi-Domain Dialogue Evaluation",
7
+ "arxiv_url": "https://arxiv.org/abs/2112.07194",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07194",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.07194",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07194/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07194/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07194/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07194/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }
2112.07954/record.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "arxiv_id": "2112.07954",
3
+ "month": "2021_12",
4
+ "year": 2022,
5
+ "conference": "ICLR",
6
+ "title": "Object Pursuit: Building a Space of Objects via Discriminative Weight Generation",
7
+ "arxiv_url": "https://arxiv.org/abs/2112.07954",
8
+ "source": {
9
+ "paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07954",
10
+ "tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.07954",
11
+ "paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07954/paper_text/paper.md",
12
+ "metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07954/metadata.json",
13
+ "intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.07954/paper_text/paper.md",
14
+ "intro_method_from_kind": "markdown"
15
+ },
16
+ "files": {
17
+ "main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/main_diagram/main_diagram.drawio",
18
+ "main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/main_diagram/main_diagram.png",
19
+ "main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/main_diagram/main_diagram.pdf",
20
+ "intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/paper_text/intro_method.md",
21
+ "paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/paper.pdf",
22
+ "latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.07954/latex_source"
23
+ },
24
+ "status": {
25
+ "copy_drawio": "exists",
26
+ "copy_png": "exists",
27
+ "diagram_pdf": "pdf_exists",
28
+ "intro_method": "exists",
29
+ "paper_pdf": "exists",
30
+ "latex": "exists"
31
+ }
32
+ }