shaocongma commited on
Commit
1457d21
1 Parent(s): 6150071

Reformat LLM interaction logic. Update prompts.

Browse files

Bug fix:
huggingchat package doesn't work; directly using Specter API instead.

New UI to include prompts mode and domain knowledge.

Testing:
Access the domain knowledge database.

api_wrapper.py CHANGED
@@ -19,10 +19,15 @@ import json, time
19
  from utils.file_operations import make_archive
20
 
21
 
22
- # GENERATOR_MAPPING = {"draft": generate_draft}
23
- GENERATOR_MAPPING = {"draft": None}
 
24
 
25
- def generator_wrapper(path_to_config_json):
 
 
 
 
26
  # Read configuration file and call corresponding function
27
  with open(path_to_config_json, "r", encoding='utf-8') as f:
28
  config = json.load(f)
 
19
  from utils.file_operations import make_archive
20
 
21
 
22
+ GENERATOR_MAPPING = {"fake": None, # a fake generator
23
+ "draft": generate_draft # generate academic paper
24
+ }
25
 
26
+ def generator_wrapper(config):
27
+ generator = GENERATOR_MAPPING[config["generator"]]
28
+
29
+
30
+ def generator_wrapper_from_json(path_to_config_json):
31
  # Read configuration file and call corresponding function
32
  with open(path_to_config_json, "r", encoding='utf-8') as f:
33
  config = json.load(f)
app.py CHANGED
@@ -8,12 +8,8 @@ from references_generator import generate_top_k_references
8
  # todo:
9
  # 6. get logs when the procedure is not completed. *
10
  # 7. 自己的文件库; 更多的prompts
11
- # 8. Decide on how to generate the main part of a paper * (Langchain/AutoGPT
12
- # 1. 把paper改成纯JSON?
13
  # 2. 实现别的功能
14
  # 3. Check API Key GPT-4 Support.
15
- # 8. Re-build some components using `langchain`
16
- # - in `gpt_interation`, use LLM
17
  # future:
18
  # generation.log sometimes disappears (ignore this)
19
  # 1. Check if there are any duplicated citations
@@ -39,22 +35,107 @@ else:
39
  try:
40
  openai.Model.list()
41
  IS_OPENAI_API_KEY_AVAILABLE = True
42
- except Exception as e:
 
43
  IS_OPENAI_API_KEY_AVAILABLE = False
44
 
 
 
 
 
 
 
 
 
45
  ALL_TEMPLATES = list_folders("latex_templates")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
 
48
  def clear_inputs(*args):
49
  return "", ""
50
 
 
51
  def clear_inputs_refs(*args):
52
  return "", 5
53
 
54
 
55
- def wrapped_generator(paper_title, paper_description, openai_api_key=None,
56
- paper_template="ICLR2022", tldr=True, selected_sections=None, bib_refs=None, model="gpt-4",
57
- cache_mode=IS_CACHE_AVAILABLE):
 
 
 
 
 
58
  # if `cache_mode` is True, then follow the following steps:
59
  # check if "title"+"description" have been generated before
60
  # if so, download from the cloud storage, return it
@@ -69,7 +150,7 @@ def wrapped_generator(paper_title, paper_description, openai_api_key=None,
69
  raise gr.Error(f"Key错误. Error: {e}")
70
 
71
  if cache_mode:
72
- from utils.storage import list_all_files, download_file, upload_file
73
  # check if "title"+"description" have been generated before
74
  input_dict = {"title": paper_title, "description": paper_description,
75
  "generator": "generate_draft"}
@@ -80,25 +161,19 @@ def wrapped_generator(paper_title, paper_description, openai_api_key=None,
80
  # download from the cloud storage, return it
81
  download_file(file_name)
82
  return file_name
83
- else:
84
- try:
85
- # generate the result.
86
- # output = fake_generate_backgrounds(title, description, openai_key)
87
- output = generate_draft(paper_title, paper_description, template=paper_template,
88
- tldr=tldr, sections=selected_sections, bib_refs=bib_refs, model=model)
89
- # output = generate_draft(paper_title, paper_description, template, "gpt-4")
90
- upload_file(output)
91
- return output
92
- except Exception as e:
93
- raise gr.Error(f"生成失败. Error {e.__name__}: {e}")
94
- else:
95
- try:
96
- # output = fake_generate_backgrounds(title, description, openai_key)
97
- output = generate_draft(paper_title, paper_description, template=paper_template,
98
- tldr=tldr, sections=selected_sections, bib_refs=bib_refs, model=model)
99
- except Exception as e:
100
- raise gr.Error(f"生成失败. Error: {e}")
101
- return output
102
 
103
 
104
  def wrapped_references_generator(paper_title, num_refs, openai_api_key=None):
@@ -108,107 +183,61 @@ def wrapped_references_generator(paper_title, num_refs, openai_api_key=None):
108
  return generate_top_k_references(paper_title, top_k=num_refs)
109
 
110
 
111
-
112
- theme = gr.themes.Default(font=gr.themes.GoogleFont("Questrial"))
113
- # .set(
114
- # background_fill_primary='#E5E4E2',
115
- # background_fill_secondary = '#F6F6F6',
116
- # button_primary_background_fill="#281A39"
117
- # )
118
-
119
- ACADEMIC_PAPER = """## 一键生成论文初稿
120
-
121
- 1. 在Title文本框中输入想要生成的论文名称(比如Playing Atari with Deep Reinforcement Learning).
122
- 2. 点击Submit. 等待大概十五分钟(全文).
123
- 3. 在右侧下载.zip格式的输出,在Overleaf上编译浏览.
124
- """
125
-
126
-
127
- REFERENCES = """## 一键搜索相关论文
128
- (此功能已经被整合进一键生成论文初稿)
129
- 1. 在Title文本框中输入想要搜索文献的论文(比如Playing Atari with Deep Reinforcement Learning).
130
- 2. 点击Submit. 等待大概十分钟.
131
- 3. 在右侧JSON处会显示相关文献.
132
- """
133
-
134
- REFERENCES_INSTRUCTION = """### References
135
- 这一行用于定义AI如何选取参考文献. 目前是两种方式混合:
136
- 1. GPT自动根据标题生���关键字,使用Semantic Scholar搜索引擎搜索文献,利用Specter获取Paper Embedding来自动选取最相关的文献作为GPT的参考资料.
137
- 2. 用户上传bibtex文件,使用Google Scholar搜索摘要作为GPT的参考资料.
138
- 关于有希望利用本地文件来供GPT参考的功能将在未来实装.
139
- """
140
-
141
- DOMAIN_KNOWLEDGE_INSTRUCTION = """### Domain Knowledge
142
- (暂未实装)
143
- 这一行用于定义AI的知识库. 将提供两种选择:
144
- 1. 各个领域内由专家预先收集资料并构建的的FAISS向量数据库. 每个数据库内包含了数百万页经过同行评议的论文和专业经典书籍.
145
- 2. 自行构建的使用OpenAI text-embedding-ada-002模型创建的FAISS向量数据库.
146
- """
147
-
148
- OTHERS_INSTRUCTION = """### Others
149
-
150
- """
151
-
152
-
153
  with gr.Blocks(theme=theme) as demo:
154
- gr.Markdown('''
155
- # Auto-Draft: 学术写作辅助工具
156
-
157
- 本Demo提供对[Auto-Draft](https://github.com/CCCBora/auto-draft)的auto_draft功能的测试.
158
- 通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
159
-
160
- ***2023-06-08 Update***:
161
- * 目前对英文的生成效果更好. 如果需要中文文章可以使用[GPT学术优化](https://github.com/binary-husky/gpt_academic)的`Latex全文翻译、润色`功能.
162
- * GPT3.5模型可能会因为Token数不够导致一部分章节为空. 可以在高级设置里减少生成的章节.
163
-
164
- ***2023-05-17 Update***: 我的API的余额用完了, 所以这个月不再能提供GPT-4的API Key. 这里为大家提供了一个位置输入OpenAI API Key. 同时也提供了GPT-3.5的兼容. 欢迎大家自行体验.
165
-
166
- 如果有更多想法和建议欢迎加入QQ群里交流, 如果我在Space里更新了Key我会第一时间通知大家. 群号: ***249738228***.
167
- ''')
168
 
169
  with gr.Row():
170
  with gr.Column(scale=2):
171
  key = gr.Textbox(value=openai_key, lines=1, max_lines=1, label="OpenAI Key",
172
  visible=not IS_OPENAI_API_KEY_AVAILABLE)
173
-
174
- # generator = gr.Dropdown(choices=["学术论文", "文献总结"], value="文献总结",
175
- # label="Selection", info="目前支持生成'学术论文'和'文献总结'.", interactive=True)
176
-
177
  # 每个功能做一个tab
178
  with gr.Tab("学术论文"):
179
  gr.Markdown(ACADEMIC_PAPER)
180
 
181
  title = gr.Textbox(value="Playing Atari with Deep Reinforcement Learning", lines=1, max_lines=1,
182
  label="Title", info="论文标题")
 
 
 
 
183
  with gr.Accordion("高级设置", open=False):
184
  with gr.Row():
185
- description_pp = gr.Textbox(lines=5, label="Description (Optional)", visible=True,
186
- info="对希望生成的论文的一些描述. 包括这篇论文的创新点, 主要贡献, 等.")
187
- with gr.Row():
188
- template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
189
- interactive=True,
190
- info="生成论文的参考模板.")
191
- model_selection = gr.Dropdown(label="Model", choices=["gpt-4", "gpt-3.5-turbo"],
192
- value="gpt-3.5-turbo",
193
- interactive=True,
194
- info="生成论文用到的语言模型.")
195
- sections = gr.CheckboxGroup(
196
- choices=["introduction", "related works", "backgrounds", "methodology", "experiments",
197
- "conclusion", "abstract"],
198
- type="value", label="生成章节", interactive=True,
199
- value=["introduction", "related works"])
 
 
 
 
 
200
 
201
  with gr.Row():
202
  with gr.Column(scale=1):
203
  gr.Markdown(REFERENCES_INSTRUCTION)
204
 
205
  with gr.Column(scale=2):
206
- search_engine = gr.Dropdown(label="Search Engine",
207
- choices=["ArXiv", "Semantic Scholar", "Google Scholar", "None"],
208
- value="Semantic Scholar",
209
- interactive=False,
210
- visible=False,
211
- info="用于决定GPT用什么搜索引擎来搜索文献. (暂不支持修改)")
 
 
212
  tldr_checkbox = gr.Checkbox(value=True, label="TLDR;",
213
  info="选择此筐表示将使用Semantic Scholar的TLDR作为文献的总结.",
214
  interactive=True)
@@ -223,11 +252,20 @@ with gr.Blocks(theme=theme) as demo:
223
  gr.Markdown(DOMAIN_KNOWLEDGE_INSTRUCTION)
224
 
225
  with gr.Column(scale=2):
 
 
 
 
 
 
 
 
 
226
  domain_knowledge = gr.Dropdown(label="预载知识库",
227
- choices=["(None)", "Machine Learning"],
228
- value="(None)",
229
- interactive=False,
230
- info="使用预先构建的知识库. (暂未实装)")
231
  local_domain_knowledge = gr.File(label="本地知识库 (暂未实装)", interactive=False)
232
  with gr.Row():
233
  clear_button_pp = gr.Button("Clear")
@@ -237,9 +275,9 @@ with gr.Blocks(theme=theme) as demo:
237
  gr.Markdown(REFERENCES)
238
 
239
  title_refs = gr.Textbox(value="Playing Atari with Deep Reinforcement Learning", lines=1, max_lines=1,
240
- label="Title", info="论文标题")
241
  slider_refs = gr.Slider(minimum=1, maximum=100, value=5, step=1,
242
- interactive=True, label="最相关的参考文献数目")
243
  with gr.Row():
244
  clear_button_refs = gr.Button("Clear")
245
  submit_button_refs = gr.Button("Submit", variant="primary")
@@ -254,25 +292,29 @@ with gr.Blocks(theme=theme) as demo:
254
  ''')
255
 
256
  with gr.Column(scale=1):
257
- style_mapping = {True: "color:white;background-color:green",
258
- False: "color:white;background-color:red"} # todo: to match website's style
259
- availability_mapping = {True: "AVAILABLE", False: "NOT AVAILABLE"}
260
- gr.Markdown(f'''## Huggingface Space Status
261
- 当`OpenAI API`显示AVAILABLE的时候这个Space可以直接使用.
262
- 当`OpenAI API`显示NOT AVAILABLE的时候这个Space可以通过在左侧输入OPENAI KEY来使用. 需要有GPT-4的API权限.
263
- 当`Cache`显示AVAILABLE的时候, 所有的输入和输出会被备份到我的云储存中. 显示NOT AVAILABLE的时候不影响实际使用.
264
- `OpenAI API`: <span style="{style_mapping[IS_OPENAI_API_KEY_AVAILABLE]}">{availability_mapping[IS_OPENAI_API_KEY_AVAILABLE]}</span>. `Cache`: <span style="{style_mapping[IS_CACHE_AVAILABLE]}">{availability_mapping[IS_CACHE_AVAILABLE]}</span>.''')
265
  file_output = gr.File(label="Output")
266
  json_output = gr.JSON(label="References")
267
 
 
 
 
 
 
 
 
 
 
268
  clear_button_pp.click(fn=clear_inputs, inputs=[title, description_pp], outputs=[title, description_pp])
269
  submit_button_pp.click(fn=wrapped_generator,
270
- inputs=[title, description_pp, key, template, tldr_checkbox, sections, bibtex_file,
271
- model_selection], outputs=file_output)
 
 
272
 
273
  clear_button_refs.click(fn=clear_inputs_refs, inputs=[title_refs, slider_refs], outputs=[title_refs, slider_refs])
274
  submit_button_refs.click(fn=wrapped_references_generator,
275
- inputs=[title_refs, slider_refs, key], outputs=json_output)
276
 
277
  demo.queue(concurrency_count=1, max_size=5, api_open=False)
278
  demo.launch(show_error=True)
 
8
  # todo:
9
  # 6. get logs when the procedure is not completed. *
10
  # 7. 自己的文件库; 更多的prompts
 
 
11
  # 2. 实现别的功能
12
  # 3. Check API Key GPT-4 Support.
 
 
13
  # future:
14
  # generation.log sometimes disappears (ignore this)
15
  # 1. Check if there are any duplicated citations
 
35
  try:
36
  openai.Model.list()
37
  IS_OPENAI_API_KEY_AVAILABLE = True
38
+ # except Exception as e:
39
+ except openai.error.AuthenticationError:
40
  IS_OPENAI_API_KEY_AVAILABLE = False
41
 
42
+ DEFAULT_MODEL = "gpt-4" if IS_OPENAI_API_KEY_AVAILABLE else "gpt-3.5-turbo"
43
+ DEFAULT_SECTIONS = ["introduction", "related works", "backgrounds", "methodology", "experiments",
44
+ "conclusion", "abstract"] if IS_OPENAI_API_KEY_AVAILABLE \
45
+ else ["introduction", "related works"]
46
+
47
+ #######################################################################################################################
48
+ # Load the list of templates & knowledge databases
49
+ #######################################################################################################################
50
  ALL_TEMPLATES = list_folders("latex_templates")
51
+ ALL_DATABASES = ["(None)"] + list_folders("knowledge_databases")
52
+
53
+ #######################################################################################################################
54
+ # Gradio UI
55
+ #######################################################################################################################
56
+ theme = gr.themes.Default(font=gr.themes.GoogleFont("Questrial"))
57
+ # .set(
58
+ # background_fill_primary='#E5E4E2',
59
+ # background_fill_secondary = '#F6F6F6',
60
+ # button_primary_background_fill="#281A39"
61
+ # )
62
+ ANNOUNCEMENT = """
63
+ # Auto-Draft: 学术写作辅助工具
64
+
65
+ 本Demo提供对[Auto-Draft](https://github.com/CCCBora/auto-draft)的auto_draft功能的测试.
66
+ 通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
67
+
68
+ ***2023-06-10 Update***:
69
+ pass
70
+
71
+
72
+ 如果有更多想法和建议欢迎加入QQ群里交流, 如果我在Space里更新了Key我会第一时间通知大家. 群号: ***249738228***."""
73
+
74
+ ACADEMIC_PAPER = """## 一键生成论文初稿
75
+ 1. 在Title文本框中输入想要生成的论文名称(比如Playing Atari with Deep Reinforcement Learning).
76
+ 2. 点击Submit. 等待大概十五分钟(全文).
77
+ 3. 在右侧下载.zip格式的输出,在Overleaf上编译浏览.
78
+ """
79
+
80
+ REFERENCES = """## 一键搜索相关论文
81
+ (此功能已经被整合进一键生成论文初稿)
82
+ 1. 在Title文本框中输入想要搜索文献的论文(比如Playing Atari with Deep Reinforcement Learning).
83
+ 2. 点击Submit. 等待大概十分钟.
84
+ 3. 在右侧JSON处会显示相关文献.
85
+ """
86
+
87
+ REFERENCES_INSTRUCTION = """### References
88
+ 这一栏用于定义AI如何选取参考文献. 目前是两种方式混合:
89
+ 1. GPT自动根据标题生成关键字,使用Semantic Scholar搜索引擎搜索文献,利用Specter获取Paper Embedding来自动选取最相关的文献作为GPT的参考资料.
90
+ 2. 用户上传bibtex文件,使用Google Scholar搜索摘要作为GPT的参考资料.
91
+ 关于有希望利用本地文件来供GPT参考的功能将在未来实装.
92
+ """
93
+
94
+ DOMAIN_KNOWLEDGE_INSTRUCTION = """### Domain Knowledge
95
+ 这一栏用于定义AI的知识库. 将提供两种选择:
96
+ 1. 各个领域内由专家预先收集资料并构建的的FAISS向量数据库. 目前实装的数据库
97
+ * (None): 不使用任何知识库
98
+ * ml_textbook_test: 包含两本机器学习教材The Elements of Statistical Learning和Reinforcement Learning Theory and Algorithms. 仅用于测试知识库Pipeline.
99
+ 2. 自行构建的使用OpenAI text-embedding-ada-002模型创建的FAISS向量数据库. (暂未实装)
100
+ """
101
+
102
+ OUTPUTS_INSTRUCTION = """### Outputs
103
+ 这一栏用于定义输出的内容:
104
+ * Template: 用于填装内容的LaTeX模板.
105
+ * Models: 使用GPT-4或者GPT-3.5-Turbo生成内容.
106
+ * Prompts模式: 不生成内容, 而是生成用于生成内容的Prompts. 可以手动复制到网页版或者其他语言模型中进行使用.
107
+ """
108
+
109
+ OTHERS_INSTRUCTION = """### Others
110
+
111
+ """
112
+
113
+ style_mapping = {True: "color:white;background-color:green",
114
+ False: "color:white;background-color:red"} # todo: to match website's style
115
+ availability_mapping = {True: "AVAILABLE", False: "NOT AVAILABLE"}
116
+ STATUS = f'''## Huggingface Space Status
117
+ 当`OpenAI API`显示AVAILABLE的时候这个Space可以直接使用.
118
+ 当`OpenAI API`显示NOT AVAILABLE的时候这个Space可以通过在左侧输入OPENAI KEY来使用. 需要有GPT-4的API权限.
119
+ 当`Cache`显示AVAILABLE的时候, 所有的输入和输出会���备份到我的云储存中. 显示NOT AVAILABLE的时候不影响实际使用.
120
+ `OpenAI API`: <span style="{style_mapping[IS_OPENAI_API_KEY_AVAILABLE]}">{availability_mapping[IS_OPENAI_API_KEY_AVAILABLE]}</span>. `Cache`: <span style="{style_mapping[IS_CACHE_AVAILABLE]}">{availability_mapping[IS_CACHE_AVAILABLE]}</span>.'''
121
 
122
 
123
  def clear_inputs(*args):
124
  return "", ""
125
 
126
+
127
  def clear_inputs_refs(*args):
128
  return "", 5
129
 
130
 
131
+ def wrapped_generator(
132
+ paper_title, paper_description, # main input
133
+ openai_api_key=None, openai_url=None, # key
134
+ tldr=True, max_kw_refs=10, bib_refs=None, max_tokens_ref=2048, # references
135
+ knowledge_database=None, max_tokens_kd=2048, query_counts=10, # domain knowledge
136
+ paper_template="ICLR2022", selected_sections=None, model="gpt-4", prompts_mode=False, # outputs parameters
137
+ cache_mode=False # IS_CACHE_AVAILABLE # handle cache mode
138
+ ):
139
  # if `cache_mode` is True, then follow the following steps:
140
  # check if "title"+"description" have been generated before
141
  # if so, download from the cloud storage, return it
 
150
  raise gr.Error(f"Key错误. Error: {e}")
151
 
152
  if cache_mode:
153
+ from utils.storage import list_all_files, download_file
154
  # check if "title"+"description" have been generated before
155
  input_dict = {"title": paper_title, "description": paper_description,
156
  "generator": "generate_draft"}
 
161
  # download from the cloud storage, return it
162
  download_file(file_name)
163
  return file_name
164
+ try:
165
+ output = generate_draft(
166
+ paper_title, description=paper_description, # main input
167
+ tldr=tldr, max_kw_refs=max_kw_refs, bib_refs=bib_refs, max_tokens_ref=max_tokens_ref, # references
168
+ knowledge_database=knowledge_database, max_tokens_kd=max_tokens_kd, query_counts=query_counts, # domain knowledge
169
+ sections=selected_sections, model=model, template=paper_template, prompts_mode=prompts_mode, # outputs parameters
170
+ )
171
+ if cache_mode:
172
+ from utils.storage import upload_file
173
+ upload_file(output)
174
+ except Exception as e:
175
+ raise gr.Error(f"生成失败. Error: {e}")
176
+ return output
 
 
 
 
 
 
177
 
178
 
179
  def wrapped_references_generator(paper_title, num_refs, openai_api_key=None):
 
183
  return generate_top_k_references(paper_title, top_k=num_refs)
184
 
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  with gr.Blocks(theme=theme) as demo:
187
+ gr.Markdown(ANNOUNCEMENT)
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
  with gr.Row():
190
  with gr.Column(scale=2):
191
  key = gr.Textbox(value=openai_key, lines=1, max_lines=1, label="OpenAI Key",
192
  visible=not IS_OPENAI_API_KEY_AVAILABLE)
193
+ url = gr.Textbox(value=None, lines=1, max_lines=1, label="URL",
194
+ visible=False)
 
 
195
  # 每个功能做一个tab
196
  with gr.Tab("学术论文"):
197
  gr.Markdown(ACADEMIC_PAPER)
198
 
199
  title = gr.Textbox(value="Playing Atari with Deep Reinforcement Learning", lines=1, max_lines=1,
200
  label="Title", info="论文标题")
201
+
202
+ description_pp = gr.Textbox(lines=5, label="Description (Optional)", visible=True,
203
+ info="这篇论文的主要贡献和创新点. (生成所有章节时共享这个信息, 保持生成的一致性.)")
204
+
205
  with gr.Accordion("高级设置", open=False):
206
  with gr.Row():
207
+ with gr.Column(scale=1):
208
+ gr.Markdown(OUTPUTS_INSTRUCTION)
209
+ with gr.Column(scale=2):
210
+ with gr.Row():
211
+ template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
212
+ interactive=True,
213
+ info="生成论文的模板.")
214
+ model_selection = gr.Dropdown(label="Model", choices=["gpt-4", "gpt-3.5-turbo"],
215
+ value=DEFAULT_MODEL,
216
+ interactive=True,
217
+ info="生成论文用到的语言模型.")
218
+ prompts_mode = gr.Checkbox(value=False, visible=True, interactive=True,
219
+ label="Prompts模式",
220
+ info="只输出用于生成论文的Prompts, 可以复制到别的地方生成论文.")
221
+
222
+ sections = gr.CheckboxGroup(
223
+ choices=["introduction", "related works", "backgrounds", "methodology", "experiments",
224
+ "conclusion", "abstract"],
225
+ type="value", label="生成章节", interactive=True, info="选择生成论文的哪些章节.",
226
+ value=DEFAULT_SECTIONS)
227
 
228
  with gr.Row():
229
  with gr.Column(scale=1):
230
  gr.Markdown(REFERENCES_INSTRUCTION)
231
 
232
  with gr.Column(scale=2):
233
+ max_kw_ref_slider = gr.Slider(minimum=1, maximum=20, value=10, step=1,
234
+ interactive=True, label="MAX_KW_REFS",
235
+ info="每个Keyword搜索几篇参考文献", visible=False)
236
+
237
+ max_tokens_ref_slider = gr.Slider(minimum=256, maximum=4096, value=2048, step=2,
238
+ interactive=True, label="MAX_TOKENS",
239
+ info="参考文献内容占用Prompts中的Token数")
240
+
241
  tldr_checkbox = gr.Checkbox(value=True, label="TLDR;",
242
  info="选择此筐表示将使用Semantic Scholar的TLDR作为文献的总结.",
243
  interactive=True)
 
252
  gr.Markdown(DOMAIN_KNOWLEDGE_INSTRUCTION)
253
 
254
  with gr.Column(scale=2):
255
+ query_counts_slider = gr.Slider(minimum=1, maximum=20, value=10, step=1,
256
+ interactive=True, label="QUERY_COUNTS",
257
+ info="从知识库内检索多少条内容", visible=False)
258
+ max_tokens_kd_slider = gr.Slider(minimum=256, maximum=4096, value=2048, step=2,
259
+ interactive=True, label="MAX_TOKENS",
260
+ info="知识库内容占用Prompts中的Token数")
261
+ # template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
262
+ # interactive=True,
263
+ # info="生成论文的参考模板.")
264
  domain_knowledge = gr.Dropdown(label="预载知识库",
265
+ choices=ALL_DATABASES,
266
+ value="(None)",
267
+ interactive=True,
268
+ info="使用预先构建的知识库.")
269
  local_domain_knowledge = gr.File(label="本地知识库 (暂未实装)", interactive=False)
270
  with gr.Row():
271
  clear_button_pp = gr.Button("Clear")
 
275
  gr.Markdown(REFERENCES)
276
 
277
  title_refs = gr.Textbox(value="Playing Atari with Deep Reinforcement Learning", lines=1, max_lines=1,
278
+ label="Title", info="论文标题")
279
  slider_refs = gr.Slider(minimum=1, maximum=100, value=5, step=1,
280
+ interactive=True, label="最相关的参考文献数目")
281
  with gr.Row():
282
  clear_button_refs = gr.Button("Clear")
283
  submit_button_refs = gr.Button("Submit", variant="primary")
 
292
  ''')
293
 
294
  with gr.Column(scale=1):
295
+ gr.Markdown(STATUS)
 
 
 
 
 
 
 
296
  file_output = gr.File(label="Output")
297
  json_output = gr.JSON(label="References")
298
 
299
+
300
+ # def wrapped_generator(
301
+ # paper_title, paper_description, # main input
302
+ # openai_api_key=None, openai_url=None, # key
303
+ # tldr=True, max_kw_refs=10, bib_refs=None, max_tokens_ref=2048, # references
304
+ # knowledge_database=None, max_tokens_kd=2048, query_counts=10, # domain knowledge
305
+ # paper_template="ICLR2022", selected_sections=None, model="gpt-4", prompts_mode=False, # outputs parameters
306
+ # cache_mode=IS_CACHE_AVAILABLE # handle cache mode
307
+ # ):
308
  clear_button_pp.click(fn=clear_inputs, inputs=[title, description_pp], outputs=[title, description_pp])
309
  submit_button_pp.click(fn=wrapped_generator,
310
+ inputs=[title, description_pp, key, url,
311
+ tldr_checkbox, max_kw_ref_slider, bibtex_file, max_tokens_ref_slider,
312
+ domain_knowledge, max_tokens_kd_slider, query_counts_slider,
313
+ template, sections, model_selection, prompts_mode], outputs=file_output)
314
 
315
  clear_button_refs.click(fn=clear_inputs_refs, inputs=[title_refs, slider_refs], outputs=[title_refs, slider_refs])
316
  submit_button_refs.click(fn=wrapped_references_generator,
317
+ inputs=[title_refs, slider_refs, key], outputs=json_output)
318
 
319
  demo.queue(concurrency_count=1, max_size=5, api_open=False)
320
  demo.launch(show_error=True)
auto_backgrounds.py CHANGED
@@ -1,10 +1,16 @@
 
1
  import os.path
2
  from utils.references import References
 
3
  from utils.file_operations import hash_name, make_archive, copy_templates
4
  from utils.tex_processing import create_copies
5
- from section_generator import keywords_generation, section_generation # figures_generation, section_generation_bg,
6
  import logging
7
  import time
 
 
 
 
8
 
9
  TOTAL_TOKENS = 0
10
  TOTAL_PROMPTS_TOKENS = 0
@@ -24,16 +30,18 @@ def log_usage(usage, generating_target, print_out=True):
24
  TOTAL_PROMPTS_TOKENS += prompts_tokens
25
  TOTAL_COMPLETION_TOKENS += completion_tokens
26
 
27
- message = f"For generating {generating_target}, {total_tokens} tokens have been used " \
28
  f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
29
- f"{TOTAL_TOKENS} tokens have been used in total.\n\n"
30
  if print_out:
31
  print(message)
32
  logging.info(message)
33
 
34
 
35
- def _generation_setup(title, description="", template="ICLR2022", tldr=False,
36
- max_kw_refs=10, bib_refs=None, max_tokens=2048):
 
 
37
  """
38
  This function handles the setup process for paper generation; it contains three folds
39
  1. Copy the template to the outputs folder. Create the log file `generation.log`
@@ -57,33 +65,115 @@ def _generation_setup(title, description="", template="ICLR2022", tldr=False,
57
  - all_paper_ids (list): A list of all paper IDs collected for the references.
58
  """
59
  # print("Generation setup...")
60
- paper = {}
61
- paper_body = {}
 
62
 
63
  # Create a copy in the outputs folder.
64
  bibtex_path, destination_folder = copy_templates(template, title)
65
  logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
66
 
67
- # Generate keywords and references
68
- # print("Initialize the paper information ...")
69
- input_dict = {"title": title, "description": description}
70
- keywords, usage = keywords_generation(input_dict)
71
- log_usage(usage, "keywords")
72
-
73
- # generate keywords dictionary # todo: in some rare situations, collected papers will be an empty list.
74
- keywords = {keyword: max_kw_refs for keyword in keywords}
75
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  ref = References(title, bib_refs)
77
  ref.collect_papers(keywords, tldr=tldr)
 
78
  all_paper_ids = ref.to_bibtex(bibtex_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
81
 
 
 
82
  paper["title"] = title
83
- paper["description"] = description
84
- paper["references"] = ref.to_prompts(max_tokens=max_tokens)
85
  paper["body"] = paper_body
86
  paper["bibtex"] = bibtex_path
 
 
 
 
87
  return paper, destination_folder, all_paper_ids
88
  # todo: use `all_paper_ids` to check if all citations are in this list
89
 
@@ -107,8 +197,11 @@ def generate_backgrounds(title, description="", template="ICLR2022", model="gpt-
107
  return make_archive(destination_folder, filename)
108
 
109
 
110
- def generate_draft(title, description="", template="ICLR2022",
111
- tldr=True, max_kw_refs=10, sections=None, bib_refs=None, model="gpt-4"):
 
 
 
112
  """
113
  This function generates a draft paper using the provided information; it contains three steps: 1. Pre-processing:
114
  Initializes the setup for paper generation and filters the sections to be included in the paper. 2. Processing:
@@ -143,20 +236,17 @@ def generate_draft(title, description="", template="ICLR2022",
143
  # pre-processing `sections` parameter;
144
  print("================START================")
145
  print(f"Generating the paper '{title}'.")
146
- print("\n") # todo: use a configuration file to define parameters
147
  print("================PRE-PROCESSING================")
 
148
  if sections is None:
149
  sections = ["introduction", "related works", "backgrounds", "methodology", "experiments", "conclusion",
150
  "abstract"]
151
  else:
152
  sections = _filter_sections(sections)
153
-
154
- if model == "gpt-4":
155
- max_tokens = 4096
156
- else:
157
- max_tokens = 2048
158
  paper, destination_folder, _ = _generation_setup(title, description, template, tldr, max_kw_refs, bib_refs,
159
- max_tokens=max_tokens)
 
 
160
 
161
  # main components
162
  print(f"================PROCESSING================")
@@ -188,8 +278,9 @@ def generate_draft(title, description="", template="ICLR2022",
188
 
189
  if __name__ == "__main__":
190
  import openai
 
191
  openai.api_key = os.getenv("OPENAI_API_KEY")
192
 
193
  target_title = "Playing Atari with Decentralized Reinforcement Learning"
194
- output = generate_draft(target_title)
195
  print(output)
 
1
+ import json
2
  import os.path
3
  from utils.references import References
4
+ from utils.knowledge import Knowledge
5
  from utils.file_operations import hash_name, make_archive, copy_templates
6
  from utils.tex_processing import create_copies
7
+ from section_generator import section_generation # figures_generation, section_generation_bg, keywords_generation,
8
  import logging
9
  import time
10
+ from langchain.vectorstores import FAISS
11
+ from utils.gpt_interaction import GPTModel
12
+ from utils.prompts import SYSTEM
13
+ from models import EMBEDDINGS
14
 
15
  TOTAL_TOKENS = 0
16
  TOTAL_PROMPTS_TOKENS = 0
 
30
  TOTAL_PROMPTS_TOKENS += prompts_tokens
31
  TOTAL_COMPLETION_TOKENS += completion_tokens
32
 
33
+ message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
34
  f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
35
+ f"{TOTAL_TOKENS} tokens have been used in total."
36
  if print_out:
37
  print(message)
38
  logging.info(message)
39
 
40
 
41
+ def _generation_setup(title, description="", template="ICLR2022",
42
+ tldr=False, max_kw_refs=10, bib_refs=None, max_tokens_ref=2048, # generating references
43
+ knowledge_database=None, max_tokens_kd=2048, query_counts=10, # querying from knowledge database
44
+ debug=True):
45
  """
46
  This function handles the setup process for paper generation; it contains three folds
47
  1. Copy the template to the outputs folder. Create the log file `generation.log`
 
65
  - all_paper_ids (list): A list of all paper IDs collected for the references.
66
  """
67
  # print("Generation setup...")
68
+ # paper = {}
69
+ # paper_body = {}
70
+ llm = GPTModel()
71
 
72
  # Create a copy in the outputs folder.
73
  bibtex_path, destination_folder = copy_templates(template, title)
74
  logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
75
 
76
+ ###################################################################################################################
77
+ # Generate contributions
78
+ ###################################################################################################################
79
+ if description:
80
+ contributions = description
81
+ else:
82
+ try:
83
+ contributions, usage = llm(systems=SYSTEM["contributions"], prompts=title, return_json=True)
84
+ contributions = [f"Contribution {idx}: {contributions[contribution]['statement']}\n" \
85
+ f"Novelty of Contribution {idx}: {contributions[contribution]['reason']}\n"
86
+ for idx, contribution in enumerate(contributions)]
87
+ contributions = "".join(contributions)
88
+ log_usage(usage, "contributions")
89
+ except RuntimeError:
90
+ if debug:
91
+ raise RuntimeError("Failed to generate contributions.")
92
+ else:
93
+ print("Failed to generate contributions. Use empty contributions.")
94
+ contributions = ""
95
+ print("Contributions:\n{}".format(contributions))
96
+ ###################################################################################################################
97
+ # Generate references
98
+ ###################################################################################################################
99
+ # input_dict = {"title": title, "description": description}
100
+ # keywords, usage = keywords_generation(input_dict)
101
+ # log_usage(usage, "keywords")
102
+ try:
103
+ keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
104
+ log_usage(usage, "keywords")
105
+ keywords = {keyword: max_kw_refs for keyword in keywords}
106
+ except RuntimeError:
107
+ if debug:
108
+ raise RuntimeError("Failed to generate keywords.")
109
+ else:
110
+ print("Failed to generate keywords. Use default keywords.")
111
+ keywords = {"machine learning": max_kw_refs, "artificial intelligence": max_kw_refs} # DEFAULT KEYWORDS
112
+ # generate keywords dictionary
113
+ # keywords = {keyword: max_kw_refs for keyword in keywords}
114
+
115
+ print("Keywords: \n", keywords)
116
+ # todo: in some rare situations, collected papers will be an empty list. handle this issue
117
  ref = References(title, bib_refs)
118
  ref.collect_papers(keywords, tldr=tldr)
119
+ references = ref.to_prompts(max_tokens=max_tokens_ref)
120
  all_paper_ids = ref.to_bibtex(bibtex_path)
121
+ ###################################################################################################################
122
+ # Generate domain knowledge
123
+ ###################################################################################################################
124
+ prompts = f"Title: {title}\n Contributions: {contributions}"
125
+ preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
126
+ # check if the database exists or not
127
+ db_path = f"knowledge_databases/{knowledge_database}"
128
+ db_config_path = os.path.join(db_path, "db_meta.json")
129
+ db_index_path = os.path.join(db_path, "faiss_index")
130
+ if os.path.isdir(db_path):
131
+ try:
132
+ # load configuration file
133
+ with open(db_config_path, "r", encoding="utf-8") as f:
134
+ db_config = json.load(f)
135
+ model_name = db_config["embedding_model"]
136
+ embeddings = EMBEDDINGS[model_name]
137
+ db = FAISS.load_local(db_index_path, embeddings)
138
+ knowledge = Knowledge(db=db)
139
+ knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
140
+ domain_knowledge = knowledge.to_prompts(max_tokens_kd)
141
+ except Exception as e:
142
+ if debug:
143
+ raise RuntimeError(f"Failed to query from FAISS. Error {e}.")
144
+ else:
145
+ print(f"Failed to query from FAISS. Error {e}. Use empty domain knowledge instead.")
146
+ domain_knowledge = ""
147
+ else:
148
+ domain_knowledge = ""
149
+
150
+ ###################################################################################################################
151
+ # Generate necessary media
152
+ ###################################################################################################################
153
+ prompts = f"Title: {title}\n Contributions: {contributions}"
154
+ try:
155
+ components, usage = llm(systems=SYSTEM["components"], prompts=prompts, return_json=True)
156
+ log_usage(usage, "media")
157
+ except RuntimeError:
158
+ if debug:
159
+ raise RuntimeError("Failed to generate media.")
160
+ else:
161
+ print("Failed to generate media. Use default media.")
162
+ components = {}
163
 
164
  print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
165
 
166
+ paper = {}
167
+ paper_body = {}
168
  paper["title"] = title
169
+ paper["description"] = contributions
170
+ paper["references"] = references
171
  paper["body"] = paper_body
172
  paper["bibtex"] = bibtex_path
173
+ paper["domain_knowledge"] = domain_knowledge
174
+ paper["components"] = components
175
+
176
+ # print(json.dumps(paper, indent=4))
177
  return paper, destination_folder, all_paper_ids
178
  # todo: use `all_paper_ids` to check if all citations are in this list
179
 
 
197
  return make_archive(destination_folder, filename)
198
 
199
 
200
+ def generate_draft(title, description="", # main input
201
+ tldr=True, max_kw_refs=10, bib_refs=None, max_tokens_ref=2048, # references
202
+ knowledge_database=None, max_tokens_kd=2048, query_counts=10, # domain knowledge
203
+ sections=None, model="gpt-4", template="ICLR2022", prompts_mode=False, # outputs parameters
204
+ ):
205
  """
206
  This function generates a draft paper using the provided information; it contains three steps: 1. Pre-processing:
207
  Initializes the setup for paper generation and filters the sections to be included in the paper. 2. Processing:
 
236
  # pre-processing `sections` parameter;
237
  print("================START================")
238
  print(f"Generating the paper '{title}'.")
 
239
  print("================PRE-PROCESSING================")
240
+ # make `sections` in a correct order
241
  if sections is None:
242
  sections = ["introduction", "related works", "backgrounds", "methodology", "experiments", "conclusion",
243
  "abstract"]
244
  else:
245
  sections = _filter_sections(sections)
 
 
 
 
 
246
  paper, destination_folder, _ = _generation_setup(title, description, template, tldr, max_kw_refs, bib_refs,
247
+ max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
248
+ query_counts=query_counts,
249
+ knowledge_database=knowledge_database)
250
 
251
  # main components
252
  print(f"================PROCESSING================")
 
278
 
279
  if __name__ == "__main__":
280
  import openai
281
+
282
  openai.api_key = os.getenv("OPENAI_API_KEY")
283
 
284
  target_title = "Playing Atari with Decentralized Reinforcement Learning"
285
+ output = generate_draft(target_title, knowledge_database="ml_textbook_test")
286
  print(output)
initialization.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """Generate necessary components of prompts. """
2
+ from utils.prompts import SYSTEM
3
+
4
+ def get_keywords(model, title):
5
+ pass
6
+
7
+
models/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .embeddings import EMBEDDINGS
models/embeddings.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.embeddings import HuggingFaceEmbeddings
2
+
3
+
4
+ model_name = 'sentence-transformers/all-MiniLM-L6-v2'
5
+ model_kwargs = {'device': 'cpu'}
6
+ encode_kwargs = {'normalize_embeddings': False}
7
+
8
+ all_minilm_l6_v2 = HuggingFaceEmbeddings(
9
+ model_name=model_name,
10
+ model_kwargs=model_kwargs,
11
+ encode_kwargs=encode_kwargs)
12
+
13
+
14
+ EMBEDDINGS = {"all-MiniLM-L6-v2": all_minilm_l6_v2}
outputs/outputs_20230608_115759/abstract.tex DELETED
@@ -1,3 +0,0 @@
1
- \begin{abstract}
2
- In this paper, we present a novel Decentralized Atari Learning (DAL) algorithm for playing Atari games using decentralized reinforcement learning. Our proposed method combines the strengths of both value-based and policy-based decentralized RL techniques and introduces a unique communication mechanism that enables agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. Through a comprehensive experimental evaluation, we demonstrate the effectiveness of our algorithm in addressing the challenges of high-dimensional sensory input and complex decision-making processes in Atari games. Our experimental results show that the DAL algorithm achieves competitive performance in terms of cumulative reward, outperforming the decentralized Dec-PG method and maintaining comparable performance with the centralized DQN and A3C methods. In terms of training time and communication overhead, the DAL algorithm exhibits significant improvements over the centralized methods, highlighting its scalability and privacy-preserving capabilities. Our work contributes to the growing body of research in decentralized reinforcement learning, offering valuable insights into the trade-offs between scalability, privacy, and performance in this domain.
3
- \end{abstract}
 
 
 
 
outputs/outputs_20230608_115759/backgrounds.tex DELETED
@@ -1,25 +0,0 @@
1
- \section{Backgrounds}
2
-
3
- The central problem in the field of decentralized reinforcement learning (RL) is to develop efficient algorithms that can learn optimal policies in multi-agent environments while addressing the challenges of scalability, privacy, and convergence. This problem is of great importance in various industrial applications, such as autonomous vehicles \citep{duan2022autonomous}, traffic signal control \citep{yang2021an}, and edge-computing-empowered Internet of Things (IoT) networks \citep{lei2022adaptive}. Theoretical challenges in this field include the design of algorithms that can handle high-dimensional state and action spaces, non-stationarity, and the exponential growth of state-action space \citep{adams2020resolving}.
4
-
5
- \subsection{Foundational Concepts and Notations}
6
-
7
- Reinforcement learning is a framework for learning optimal policies through interaction with an environment \citep{sutton2005reinforcement}. In this framework, an agent takes actions in an environment to achieve a goal, and the environment provides feedback in the form of rewards. The objective of the agent is to learn a policy that maximizes the expected cumulative reward over time.
8
-
9
- A standard RL problem is modeled as a Markov Decision Process (MDP), defined by a tuple $(\mathcal{S}, \mathcal{A}, \mathcal{P}, \mathcal{R}, \gamma)$, where $\mathcal{S}$ is the state space, $\mathcal{A}$ is the action space, $\mathcal{P}: \mathcal{S} \times \mathcal{A} \times \mathcal{S} \rightarrow [0, 1]$ is the state transition probability function, $\mathcal{R}: \mathcal{S} \times \mathcal{A} \rightarrow \mathbb{R}$ is the reward function, and $\gamma \in [0, 1)$ is the discount factor. The agent's goal is to learn a policy $\pi: \mathcal{S} \rightarrow \mathcal{A}$ that maximizes the expected cumulative reward, defined as $V^\pi(s) = \mathbb{E}\left[\sum_{t=0}^{\infty} \gamma^t R_t | S_0 = s, \pi\right]$.
10
-
11
- In decentralized RL, multiple agents interact with the environment and each other to learn optimal policies. The problem can be modeled as a Decentralized Markov Decision Process (D-MDP) \citep{lu2021decentralized}, which extends the MDP framework to include multiple agents and their local observations, actions, and policies. The D-MDP is defined by a tuple $(\mathcal{S}, \mathcal{A}_1, \dots, \mathcal{A}_n, \mathcal{P}, \mathcal{R}_1, \dots, \mathcal{R}_n, \gamma)$, where $n$ is the number of agents, $\mathcal{A}_i$ is the action space of agent $i$, and $\mathcal{R}_i$ is the reward function of agent $i$. Each agent aims to learn a local policy $\pi_i: \mathcal{S} \rightarrow \mathcal{A}_i$ that maximizes its expected cumulative reward.
12
-
13
- \subsection{Decentralized Reinforcement Learning Algorithms}
14
-
15
- Decentralized RL algorithms can be broadly categorized into two classes: value-based and policy-based methods. Value-based methods, such as decentralized Q-learning \citep{hasselt2015deep}, aim to learn an action-value function $Q^\pi(s, a)$, which represents the expected cumulative reward of taking action $a$ in state $s$ and following policy $\pi$ thereafter. The optimal policy can be derived from the optimal action-value function, $Q^*(s, a) = \max_\pi Q^\pi(s, a)$, as $\pi^*(s) = \arg\max_a Q^*(s, a)$. Deep Q-Networks (DQNs) \citep{mnih2013playing} extend Q-learning to high-dimensional state spaces by using deep neural networks to approximate the action-value function.
16
-
17
- Policy-based methods, such as decentralized policy gradient (Dec-PG) \citep{lu2021decentralized}, directly optimize the policy by following the gradient of the expected cumulative reward with respect to the policy parameters. Actor-critic algorithms \citep{lillicrap2015continuous} combine the advantages of both value-based and policy-based methods by using a critic to estimate the action-value function and an actor to update the policy based on the critic's estimates. Decentralized actor-critic algorithms have been proposed for continuous control tasks \citep{mnih2016asynchronous} and multi-agent collision avoidance \citep{thumiger2022a}.
18
-
19
- In this paper, we focus on the application of decentralized RL algorithms to the problem of playing Atari games. We build upon the foundational concepts and algorithms introduced above and develop a novel decentralized RL algorithm that addresses the challenges of scalability, privacy, and convergence in multi-agent Atari environments.
20
-
21
- \subsection{Decentralized Learning in Atari Environments}
22
-
23
- Atari games provide a challenging testbed for RL algorithms due to their high-dimensional state spaces, diverse game dynamics, and complex scoring systems \citep{mnih2013playing}. Recent advances in deep RL have led to the development of algorithms that can learn to play Atari games directly from raw pixel inputs, outperforming human experts in some cases \citep{mnih2013playing}. However, most of these algorithms are centralized and do not scale well to large multi-agent environments.
24
-
25
- In this paper, we propose a novel decentralized RL algorithm for playing Atari games that leverages the advantages of both value-based and policy-based methods. Our algorithm builds upon the decentralized Q-learning and Dec-PG frameworks and incorporates techniques from deep RL, such as experience replay \citep{mnih2013playing} and target networks \citep{hasselt2015deep}, to improve stability and convergence. We also introduce a novel communication mechanism that allows agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. Our experimental results demonstrate that our algorithm achieves competitive performance compared to centralized methods and outperforms existing decentralized RL algorithms in the Atari domain.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/conclusion.tex DELETED
@@ -1,7 +0,0 @@
1
- \section{Conclusion}
2
-
3
- In this paper, we presented a novel Decentralized Atari Learning (DAL) algorithm for playing Atari games using decentralized reinforcement learning. Our proposed method combines the strengths of both value-based and policy-based decentralized RL techniques and introduces a unique communication mechanism that enables agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. Through a comprehensive experimental evaluation, we demonstrated the effectiveness of our algorithm in addressing the challenges of high-dimensional sensory input and complex decision-making processes in Atari games.
4
-
5
- Our experimental results showed that the DAL algorithm achieves competitive performance in terms of cumulative reward, outperforming the decentralized Dec-PG method and maintaining comparable performance with the centralized DQN and A3C methods. In terms of training time and communication overhead, the DAL algorithm exhibits significant improvements over the centralized methods, highlighting its scalability and privacy-preserving capabilities.
6
-
7
- In conclusion, our proposed Decentralized Atari Learning (DAL) algorithm contributes to the growing body of research in decentralized reinforcement learning, offering valuable insights into the trade-offs between scalability, privacy, and performance in this domain. By building upon recent advancements in decentralized RL and addressing the unique challenges associated with playing Atari games, our work paves the way for future research in large-scale, privacy-preserving multi-agent systems and their applications in various domains.
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/exp1.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/exp2.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/exp3.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/experiments.tex DELETED
@@ -1,91 +0,0 @@
1
- \section{experiments}
2
-
3
- In this section, we present the experimental setup and results of our proposed Decentralized Atari Learning (DAL) algorithm. We begin with a high-level overview of the experimental design, followed by a detailed description of the evaluation metrics, baselines, and the Atari games used for evaluation. Finally, we present the results of our experiments, including comparisons with state-of-the-art centralized and decentralized RL methods, and discuss the insights gained from our analysis.
4
-
5
- \subsection{Experimental Design}
6
-
7
- Our experiments are designed to evaluate the performance of the DAL algorithm in terms of scalability, privacy, and convergence in multi-agent Atari environments. We compare our method with state-of-the-art centralized and decentralized RL approaches to demonstrate its effectiveness in addressing the challenges of high-dimensional sensory input and complex decision-making processes. The experimental setup consists of the following main components:
8
-
9
- \begin{itemize}
10
- \item Evaluation Metrics: We use the following metrics to evaluate the performance of our algorithm: cumulative reward, training time, and communication overhead.
11
- \item Baselines: We compare our method with state-of-the-art centralized and decentralized RL approaches, including DQN \citep{mnih2013playing}, A3C \citep{mnih2016asynchronous}, and Dec-PG \citep{lu2021decentralized}.
12
- \item Atari Games: We evaluate our algorithm on a diverse set of Atari games, including Breakout, Pong, Space Invaders, and Ms. Pac-Man, to demonstrate its generalizability and robustness.
13
- \end{itemize}
14
-
15
- \subsection{Evaluation Metrics}
16
-
17
- We use the following evaluation metrics to assess the performance of our proposed DAL algorithm:
18
-
19
- \begin{itemize}
20
- \item \textbf{Cumulative Reward:} The total reward accumulated by the agents during an episode, which serves as a measure of the agents' performance in the Atari games.
21
- \item \textbf{Training Time:} The time taken by the agents to learn their policies, which serves as a measure of the algorithm's scalability and efficiency.
22
- \item \textbf{Communication Overhead:} The amount of information exchanged between the agents during the learning process, which serves as a measure of the algorithm's privacy and communication efficiency.
23
- \end{itemize}
24
-
25
- \subsection{Baselines}
26
-
27
- We compare the performance of our proposed DAL algorithm with the following state-of-the-art centralized and decentralized RL methods:
28
-
29
- \begin{itemize}
30
- \item \textbf{DQN} \citep{mnih2013playing}: A centralized deep Q-learning algorithm that learns to play Atari games directly from raw pixel inputs.
31
- \item \textbf{A3C} \citep{mnih2016asynchronous}: A centralized actor-critic algorithm that combines the advantages of both value-based and policy-based methods for continuous control tasks and Atari games.
32
- \item \textbf{Dec-PG} \citep{lu2021decentralized}: A decentralized policy gradient algorithm that accounts for coupled safety constraints in multi-agent reinforcement learning.
33
- \end{itemize}
34
-
35
- \subsection{Atari Games}
36
-
37
- We evaluate our algorithm on a diverse set of Atari games, including the following:
38
-
39
- \begin{itemize}
40
- \item \textbf{Breakout:} A single-player game in which the agent controls a paddle to bounce a ball and break bricks.
41
- \item \textbf{Pong:} A two-player game in which the agents control paddles to bounce a ball and score points by passing the ball past the opponent's paddle.
42
- \item \textbf{Space Invaders:} A single-player game in which the agent controls a spaceship to shoot down invading aliens while avoiding their projectiles.
43
- \item \textbf{Ms. Pac-Man:} A single-player game in which the agent controls Ms. Pac-Man to eat pellets and avoid ghosts in a maze.
44
- \end{itemize}
45
-
46
- \subsection{Results and Discussion}
47
-
48
- We present the results of our experiments in Table \ref{tab:results} and Figures \ref{exp1}, \ref{exp2}, and \ref{exp3}. Our proposed DAL algorithm demonstrates competitive performance compared to the centralized and decentralized baselines in terms of cumulative reward, training time, and communication overhead.
49
-
50
- \begin{table}[h]
51
- \centering
52
- \caption{Comparison of the performance of DAL and baseline methods on Atari games.}
53
- \label{tab:results}
54
- \begin{tabular}{lccc}
55
- \toprule
56
- Method & Cumulative Reward & Training Time & Communication Overhead \\
57
- \midrule
58
- \textbf{DAL (Ours)} & \textbf{X1} & \textbf{Y1} & \textbf{Z1} \\
59
- DQN & X2 & Y2 & Z2 \\
60
- A3C & X3 & Y3 & Z3 \\
61
- Dec-PG & X4 & Y4 & Z4 \\
62
- \bottomrule
63
- \end{tabular}
64
- \end{table}
65
-
66
- \begin{figure}[h]
67
- \centering
68
- \includegraphics[width=0.8\textwidth]{exp1.png}
69
- \caption{Comparison of the cumulative reward achieved by DAL and baseline methods on Atari games.}
70
- \label{exp1}
71
- \end{figure}
72
-
73
- \begin{figure}[h]
74
- \centering
75
- \includegraphics[width=0.8\textwidth]{exp2.png}
76
- \caption{Comparison of the training time required by DAL and baseline methods on Atari games.}
77
- \label{exp2}
78
- \end{figure}
79
-
80
- \begin{figure}[h]
81
- \centering
82
- \includegraphics[width=0.8\textwidth]{exp3.png}
83
- \caption{Comparison of the communication overhead incurred by DAL and baseline methods on Atari games.}
84
- \label{exp3}
85
- \end{figure}
86
-
87
- Our analysis reveals that the DAL algorithm achieves competitive performance in terms of cumulative reward, outperforming the decentralized Dec-PG method and maintaining comparable performance with the centralized DQN and A3C methods. This demonstrates the effectiveness of our algorithm in addressing the challenges of high-dimensional sensory input and complex decision-making processes in Atari games.
88
-
89
- In terms of training time and communication overhead, the DAL algorithm shows significant improvements over the centralized methods, highlighting its scalability and privacy-preserving capabilities. The algorithm also outperforms the Dec-PG method in these aspects, demonstrating the benefits of our novel communication mechanism.
90
-
91
- In summary, our experiments demonstrate the effectiveness of our proposed Decentralized Atari Learning (DAL) algorithm in playing Atari games using decentralized reinforcement learning. The algorithm achieves competitive performance compared to state-of-the-art centralized and decentralized RL methods while maintaining scalability, privacy, and convergence in multi-agent Atari environments.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/fancyhdr.sty DELETED
@@ -1,485 +0,0 @@
1
- % fancyhdr.sty version 3.2
2
- % Fancy headers and footers for LaTeX.
3
- % Piet van Oostrum,
4
- % Dept of Computer and Information Sciences, University of Utrecht,
5
- % Padualaan 14, P.O. Box 80.089, 3508 TB Utrecht, The Netherlands
6
- % Telephone: +31 30 2532180. Email: piet@cs.uu.nl
7
- % ========================================================================
8
- % LICENCE:
9
- % This file may be distributed under the terms of the LaTeX Project Public
10
- % License, as described in lppl.txt in the base LaTeX distribution.
11
- % Either version 1 or, at your option, any later version.
12
- % ========================================================================
13
- % MODIFICATION HISTORY:
14
- % Sep 16, 1994
15
- % version 1.4: Correction for use with \reversemargin
16
- % Sep 29, 1994:
17
- % version 1.5: Added the \iftopfloat, \ifbotfloat and \iffloatpage commands
18
- % Oct 4, 1994:
19
- % version 1.6: Reset single spacing in headers/footers for use with
20
- % setspace.sty or doublespace.sty
21
- % Oct 4, 1994:
22
- % version 1.7: changed \let\@mkboth\markboth to
23
- % \def\@mkboth{\protect\markboth} to make it more robust
24
- % Dec 5, 1994:
25
- % version 1.8: corrections for amsbook/amsart: define \@chapapp and (more
26
- % importantly) use the \chapter/sectionmark definitions from ps@headings if
27
- % they exist (which should be true for all standard classes).
28
- % May 31, 1995:
29
- % version 1.9: The proposed \renewcommand{\headrulewidth}{\iffloatpage...
30
- % construction in the doc did not work properly with the fancyplain style.
31
- % June 1, 1995:
32
- % version 1.91: The definition of \@mkboth wasn't restored on subsequent
33
- % \pagestyle{fancy}'s.
34
- % June 1, 1995:
35
- % version 1.92: The sequence \pagestyle{fancyplain} \pagestyle{plain}
36
- % \pagestyle{fancy} would erroneously select the plain version.
37
- % June 1, 1995:
38
- % version 1.93: \fancypagestyle command added.
39
- % Dec 11, 1995:
40
- % version 1.94: suggested by Conrad Hughes <chughes@maths.tcd.ie>
41
- % CJCH, Dec 11, 1995: added \footruleskip to allow control over footrule
42
- % position (old hardcoded value of .3\normalbaselineskip is far too high
43
- % when used with very small footer fonts).
44
- % Jan 31, 1996:
45
- % version 1.95: call \@normalsize in the reset code if that is defined,
46
- % otherwise \normalsize.
47
- % this is to solve a problem with ucthesis.cls, as this doesn't
48
- % define \@currsize. Unfortunately for latex209 calling \normalsize doesn't
49
- % work as this is optimized to do very little, so there \@normalsize should
50
- % be called. Hopefully this code works for all versions of LaTeX known to
51
- % mankind.
52
- % April 25, 1996:
53
- % version 1.96: initialize \headwidth to a magic (negative) value to catch
54
- % most common cases that people change it before calling \pagestyle{fancy}.
55
- % Note it can't be initialized when reading in this file, because
56
- % \textwidth could be changed afterwards. This is quite probable.
57
- % We also switch to \MakeUppercase rather than \uppercase and introduce a
58
- % \nouppercase command for use in headers. and footers.
59
- % May 3, 1996:
60
- % version 1.97: Two changes:
61
- % 1. Undo the change in version 1.8 (using the pagestyle{headings} defaults
62
- % for the chapter and section marks. The current version of amsbook and
63
- % amsart classes don't seem to need them anymore. Moreover the standard
64
- % latex classes don't use \markboth if twoside isn't selected, and this is
65
- % confusing as \leftmark doesn't work as expected.
66
- % 2. include a call to \ps@empty in ps@@fancy. This is to solve a problem
67
- % in the amsbook and amsart classes, that make global changes to \topskip,
68
- % which are reset in \ps@empty. Hopefully this doesn't break other things.
69
- % May 7, 1996:
70
- % version 1.98:
71
- % Added % after the line \def\nouppercase
72
- % May 7, 1996:
73
- % version 1.99: This is the alpha version of fancyhdr 2.0
74
- % Introduced the new commands \fancyhead, \fancyfoot, and \fancyhf.
75
- % Changed \headrulewidth, \footrulewidth, \footruleskip to
76
- % macros rather than length parameters, In this way they can be
77
- % conditionalized and they don't consume length registers. There is no need
78
- % to have them as length registers unless you want to do calculations with
79
- % them, which is unlikely. Note that this may make some uses of them
80
- % incompatible (i.e. if you have a file that uses \setlength or \xxxx=)
81
- % May 10, 1996:
82
- % version 1.99a:
83
- % Added a few more % signs
84
- % May 10, 1996:
85
- % version 1.99b:
86
- % Changed the syntax of \f@nfor to be resistent to catcode changes of :=
87
- % Removed the [1] from the defs of \lhead etc. because the parameter is
88
- % consumed by the \@[xy]lhead etc. macros.
89
- % June 24, 1997:
90
- % version 1.99c:
91
- % corrected \nouppercase to also include the protected form of \MakeUppercase
92
- % \global added to manipulation of \headwidth.
93
- % \iffootnote command added.
94
- % Some comments added about \@fancyhead and \@fancyfoot.
95
- % Aug 24, 1998
96
- % version 1.99d
97
- % Changed the default \ps@empty to \ps@@empty in order to allow
98
- % \fancypagestyle{empty} redefinition.
99
- % Oct 11, 2000
100
- % version 2.0
101
- % Added LPPL license clause.
102
- %
103
- % A check for \headheight is added. An errormessage is given (once) if the
104
- % header is too large. Empty headers don't generate the error even if
105
- % \headheight is very small or even 0pt.
106
- % Warning added for the use of 'E' option when twoside option is not used.
107
- % In this case the 'E' fields will never be used.
108
- %
109
- % Mar 10, 2002
110
- % version 2.1beta
111
- % New command: \fancyhfoffset[place]{length}
112
- % defines offsets to be applied to the header/footer to let it stick into
113
- % the margins (if length > 0).
114
- % place is like in fancyhead, except that only E,O,L,R can be used.
115
- % This replaces the old calculation based on \headwidth and the marginpar
116
- % area.
117
- % \headwidth will be dynamically calculated in the headers/footers when
118
- % this is used.
119
- %
120
- % Mar 26, 2002
121
- % version 2.1beta2
122
- % \fancyhfoffset now also takes h,f as possible letters in the argument to
123
- % allow the header and footer widths to be different.
124
- % New commands \fancyheadoffset and \fancyfootoffset added comparable to
125
- % \fancyhead and \fancyfoot.
126
- % Errormessages and warnings have been made more informative.
127
- %
128
- % Dec 9, 2002
129
- % version 2.1
130
- % The defaults for \footrulewidth, \plainheadrulewidth and
131
- % \plainfootrulewidth are changed from \z@skip to 0pt. In this way when
132
- % someone inadvertantly uses \setlength to change any of these, the value
133
- % of \z@skip will not be changed, rather an errormessage will be given.
134
-
135
- % March 3, 2004
136
- % Release of version 3.0
137
-
138
- % Oct 7, 2004
139
- % version 3.1
140
- % Added '\endlinechar=13' to \fancy@reset to prevent problems with
141
- % includegraphics in header when verbatiminput is active.
142
-
143
- % March 22, 2005
144
- % version 3.2
145
- % reset \everypar (the real one) in \fancy@reset because spanish.ldf does
146
- % strange things with \everypar between << and >>.
147
-
148
- \def\ifancy@mpty#1{\def\temp@a{#1}\ifx\temp@a\@empty}
149
-
150
- \def\fancy@def#1#2{\ifancy@mpty{#2}\fancy@gbl\def#1{\leavevmode}\else
151
- \fancy@gbl\def#1{#2\strut}\fi}
152
-
153
- \let\fancy@gbl\global
154
-
155
- \def\@fancyerrmsg#1{%
156
- \ifx\PackageError\undefined
157
- \errmessage{#1}\else
158
- \PackageError{Fancyhdr}{#1}{}\fi}
159
- \def\@fancywarning#1{%
160
- \ifx\PackageWarning\undefined
161
- \errmessage{#1}\else
162
- \PackageWarning{Fancyhdr}{#1}{}\fi}
163
-
164
- % Usage: \@forc \var{charstring}{command to be executed for each char}
165
- % This is similar to LaTeX's \@tfor, but expands the charstring.
166
-
167
- \def\@forc#1#2#3{\expandafter\f@rc\expandafter#1\expandafter{#2}{#3}}
168
- \def\f@rc#1#2#3{\def\temp@ty{#2}\ifx\@empty\temp@ty\else
169
- \f@@rc#1#2\f@@rc{#3}\fi}
170
- \def\f@@rc#1#2#3\f@@rc#4{\def#1{#2}#4\f@rc#1{#3}{#4}}
171
-
172
- % Usage: \f@nfor\name:=list\do{body}
173
- % Like LaTeX's \@for but an empty list is treated as a list with an empty
174
- % element
175
-
176
- \newcommand{\f@nfor}[3]{\edef\@fortmp{#2}%
177
- \expandafter\@forloop#2,\@nil,\@nil\@@#1{#3}}
178
-
179
- % Usage: \def@ult \cs{defaults}{argument}
180
- % sets \cs to the characters from defaults appearing in argument
181
- % or defaults if it would be empty. All characters are lowercased.
182
-
183
- \newcommand\def@ult[3]{%
184
- \edef\temp@a{\lowercase{\edef\noexpand\temp@a{#3}}}\temp@a
185
- \def#1{}%
186
- \@forc\tmpf@ra{#2}%
187
- {\expandafter\if@in\tmpf@ra\temp@a{\edef#1{#1\tmpf@ra}}{}}%
188
- \ifx\@empty#1\def#1{#2}\fi}
189
- %
190
- % \if@in <char><set><truecase><falsecase>
191
- %
192
- \newcommand{\if@in}[4]{%
193
- \edef\temp@a{#2}\def\temp@b##1#1##2\temp@b{\def\temp@b{##1}}%
194
- \expandafter\temp@b#2#1\temp@b\ifx\temp@a\temp@b #4\else #3\fi}
195
-
196
- \newcommand{\fancyhead}{\@ifnextchar[{\f@ncyhf\fancyhead h}%
197
- {\f@ncyhf\fancyhead h[]}}
198
- \newcommand{\fancyfoot}{\@ifnextchar[{\f@ncyhf\fancyfoot f}%
199
- {\f@ncyhf\fancyfoot f[]}}
200
- \newcommand{\fancyhf}{\@ifnextchar[{\f@ncyhf\fancyhf{}}%
201
- {\f@ncyhf\fancyhf{}[]}}
202
-
203
- % New commands for offsets added
204
-
205
- \newcommand{\fancyheadoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyheadoffset h}%
206
- {\f@ncyhfoffs\fancyheadoffset h[]}}
207
- \newcommand{\fancyfootoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyfootoffset f}%
208
- {\f@ncyhfoffs\fancyfootoffset f[]}}
209
- \newcommand{\fancyhfoffset}{\@ifnextchar[{\f@ncyhfoffs\fancyhfoffset{}}%
210
- {\f@ncyhfoffs\fancyhfoffset{}[]}}
211
-
212
- % The header and footer fields are stored in command sequences with
213
- % names of the form: \f@ncy<x><y><z> with <x> for [eo], <y> from [lcr]
214
- % and <z> from [hf].
215
-
216
- \def\f@ncyhf#1#2[#3]#4{%
217
- \def\temp@c{}%
218
- \@forc\tmpf@ra{#3}%
219
- {\expandafter\if@in\tmpf@ra{eolcrhf,EOLCRHF}%
220
- {}{\edef\temp@c{\temp@c\tmpf@ra}}}%
221
- \ifx\@empty\temp@c\else
222
- \@fancyerrmsg{Illegal char `\temp@c' in \string#1 argument:
223
- [#3]}%
224
- \fi
225
- \f@nfor\temp@c{#3}%
226
- {\def@ult\f@@@eo{eo}\temp@c
227
- \if@twoside\else
228
- \if\f@@@eo e\@fancywarning
229
- {\string#1's `E' option without twoside option is useless}\fi\fi
230
- \def@ult\f@@@lcr{lcr}\temp@c
231
- \def@ult\f@@@hf{hf}{#2\temp@c}%
232
- \@forc\f@@eo\f@@@eo
233
- {\@forc\f@@lcr\f@@@lcr
234
- {\@forc\f@@hf\f@@@hf
235
- {\expandafter\fancy@def\csname
236
- f@ncy\f@@eo\f@@lcr\f@@hf\endcsname
237
- {#4}}}}}}
238
-
239
- \def\f@ncyhfoffs#1#2[#3]#4{%
240
- \def\temp@c{}%
241
- \@forc\tmpf@ra{#3}%
242
- {\expandafter\if@in\tmpf@ra{eolrhf,EOLRHF}%
243
- {}{\edef\temp@c{\temp@c\tmpf@ra}}}%
244
- \ifx\@empty\temp@c\else
245
- \@fancyerrmsg{Illegal char `\temp@c' in \string#1 argument:
246
- [#3]}%
247
- \fi
248
- \f@nfor\temp@c{#3}%
249
- {\def@ult\f@@@eo{eo}\temp@c
250
- \if@twoside\else
251
- \if\f@@@eo e\@fancywarning
252
- {\string#1's `E' option without twoside option is useless}\fi\fi
253
- \def@ult\f@@@lcr{lr}\temp@c
254
- \def@ult\f@@@hf{hf}{#2\temp@c}%
255
- \@forc\f@@eo\f@@@eo
256
- {\@forc\f@@lcr\f@@@lcr
257
- {\@forc\f@@hf\f@@@hf
258
- {\expandafter\setlength\csname
259
- f@ncyO@\f@@eo\f@@lcr\f@@hf\endcsname
260
- {#4}}}}}%
261
- \fancy@setoffs}
262
-
263
- % Fancyheadings version 1 commands. These are more or less deprecated,
264
- % but they continue to work.
265
-
266
- \newcommand{\lhead}{\@ifnextchar[{\@xlhead}{\@ylhead}}
267
- \def\@xlhead[#1]#2{\fancy@def\f@ncyelh{#1}\fancy@def\f@ncyolh{#2}}
268
- \def\@ylhead#1{\fancy@def\f@ncyelh{#1}\fancy@def\f@ncyolh{#1}}
269
-
270
- \newcommand{\chead}{\@ifnextchar[{\@xchead}{\@ychead}}
271
- \def\@xchead[#1]#2{\fancy@def\f@ncyech{#1}\fancy@def\f@ncyoch{#2}}
272
- \def\@ychead#1{\fancy@def\f@ncyech{#1}\fancy@def\f@ncyoch{#1}}
273
-
274
- \newcommand{\rhead}{\@ifnextchar[{\@xrhead}{\@yrhead}}
275
- \def\@xrhead[#1]#2{\fancy@def\f@ncyerh{#1}\fancy@def\f@ncyorh{#2}}
276
- \def\@yrhead#1{\fancy@def\f@ncyerh{#1}\fancy@def\f@ncyorh{#1}}
277
-
278
- \newcommand{\lfoot}{\@ifnextchar[{\@xlfoot}{\@ylfoot}}
279
- \def\@xlfoot[#1]#2{\fancy@def\f@ncyelf{#1}\fancy@def\f@ncyolf{#2}}
280
- \def\@ylfoot#1{\fancy@def\f@ncyelf{#1}\fancy@def\f@ncyolf{#1}}
281
-
282
- \newcommand{\cfoot}{\@ifnextchar[{\@xcfoot}{\@ycfoot}}
283
- \def\@xcfoot[#1]#2{\fancy@def\f@ncyecf{#1}\fancy@def\f@ncyocf{#2}}
284
- \def\@ycfoot#1{\fancy@def\f@ncyecf{#1}\fancy@def\f@ncyocf{#1}}
285
-
286
- \newcommand{\rfoot}{\@ifnextchar[{\@xrfoot}{\@yrfoot}}
287
- \def\@xrfoot[#1]#2{\fancy@def\f@ncyerf{#1}\fancy@def\f@ncyorf{#2}}
288
- \def\@yrfoot#1{\fancy@def\f@ncyerf{#1}\fancy@def\f@ncyorf{#1}}
289
-
290
- \newlength{\fancy@headwidth}
291
- \let\headwidth\fancy@headwidth
292
- \newlength{\f@ncyO@elh}
293
- \newlength{\f@ncyO@erh}
294
- \newlength{\f@ncyO@olh}
295
- \newlength{\f@ncyO@orh}
296
- \newlength{\f@ncyO@elf}
297
- \newlength{\f@ncyO@erf}
298
- \newlength{\f@ncyO@olf}
299
- \newlength{\f@ncyO@orf}
300
- \newcommand{\headrulewidth}{0.4pt}
301
- \newcommand{\footrulewidth}{0pt}
302
- \newcommand{\footruleskip}{.3\normalbaselineskip}
303
-
304
- % Fancyplain stuff shouldn't be used anymore (rather
305
- % \fancypagestyle{plain} should be used), but it must be present for
306
- % compatibility reasons.
307
-
308
- \newcommand{\plainheadrulewidth}{0pt}
309
- \newcommand{\plainfootrulewidth}{0pt}
310
- \newif\if@fancyplain \@fancyplainfalse
311
- \def\fancyplain#1#2{\if@fancyplain#1\else#2\fi}
312
-
313
- \headwidth=-123456789sp %magic constant
314
-
315
- % Command to reset various things in the headers:
316
- % a.o. single spacing (taken from setspace.sty)
317
- % and the catcode of ^^M (so that epsf files in the header work if a
318
- % verbatim crosses a page boundary)
319
- % It also defines a \nouppercase command that disables \uppercase and
320
- % \Makeuppercase. It can only be used in the headers and footers.
321
- \let\fnch@everypar\everypar% save real \everypar because of spanish.ldf
322
- \def\fancy@reset{\fnch@everypar{}\restorecr\endlinechar=13
323
- \def\baselinestretch{1}%
324
- \def\nouppercase##1{{\let\uppercase\relax\let\MakeUppercase\relax
325
- \expandafter\let\csname MakeUppercase \endcsname\relax##1}}%
326
- \ifx\undefined\@newbaseline% NFSS not present; 2.09 or 2e
327
- \ifx\@normalsize\undefined \normalsize % for ucthesis.cls
328
- \else \@normalsize \fi
329
- \else% NFSS (2.09) present
330
- \@newbaseline%
331
- \fi}
332
-
333
- % Initialization of the head and foot text.
334
-
335
- % The default values still contain \fancyplain for compatibility.
336
- \fancyhf{} % clear all
337
- % lefthead empty on ``plain'' pages, \rightmark on even, \leftmark on odd pages
338
- % evenhead empty on ``plain'' pages, \leftmark on even, \rightmark on odd pages
339
- \if@twoside
340
- \fancyhead[el,or]{\fancyplain{}{\sl\rightmark}}
341
- \fancyhead[er,ol]{\fancyplain{}{\sl\leftmark}}
342
- \else
343
- \fancyhead[l]{\fancyplain{}{\sl\rightmark}}
344
- \fancyhead[r]{\fancyplain{}{\sl\leftmark}}
345
- \fi
346
- \fancyfoot[c]{\rm\thepage} % page number
347
-
348
- % Use box 0 as a temp box and dimen 0 as temp dimen.
349
- % This can be done, because this code will always
350
- % be used inside another box, and therefore the changes are local.
351
-
352
- \def\@fancyvbox#1#2{\setbox0\vbox{#2}\ifdim\ht0>#1\@fancywarning
353
- {\string#1 is too small (\the#1): ^^J Make it at least \the\ht0.^^J
354
- We now make it that large for the rest of the document.^^J
355
- This may cause the page layout to be inconsistent, however\@gobble}%
356
- \dimen0=#1\global\setlength{#1}{\ht0}\ht0=\dimen0\fi
357
- \box0}
358
-
359
- % Put together a header or footer given the left, center and
360
- % right text, fillers at left and right and a rule.
361
- % The \lap commands put the text into an hbox of zero size,
362
- % so overlapping text does not generate an errormessage.
363
- % These macros have 5 parameters:
364
- % 1. LEFTSIDE BEARING % This determines at which side the header will stick
365
- % out. When \fancyhfoffset is used this calculates \headwidth, otherwise
366
- % it is \hss or \relax (after expansion).
367
- % 2. \f@ncyolh, \f@ncyelh, \f@ncyolf or \f@ncyelf. This is the left component.
368
- % 3. \f@ncyoch, \f@ncyech, \f@ncyocf or \f@ncyecf. This is the middle comp.
369
- % 4. \f@ncyorh, \f@ncyerh, \f@ncyorf or \f@ncyerf. This is the right component.
370
- % 5. RIGHTSIDE BEARING. This is always \relax or \hss (after expansion).
371
-
372
- \def\@fancyhead#1#2#3#4#5{#1\hbox to\headwidth{\fancy@reset
373
- \@fancyvbox\headheight{\hbox
374
- {\rlap{\parbox[b]{\headwidth}{\raggedright#2}}\hfill
375
- \parbox[b]{\headwidth}{\centering#3}\hfill
376
- \llap{\parbox[b]{\headwidth}{\raggedleft#4}}}\headrule}}#5}
377
-
378
- \def\@fancyfoot#1#2#3#4#5{#1\hbox to\headwidth{\fancy@reset
379
- \@fancyvbox\footskip{\footrule
380
- \hbox{\rlap{\parbox[t]{\headwidth}{\raggedright#2}}\hfill
381
- \parbox[t]{\headwidth}{\centering#3}\hfill
382
- \llap{\parbox[t]{\headwidth}{\raggedleft#4}}}}}#5}
383
-
384
- \def\headrule{{\if@fancyplain\let\headrulewidth\plainheadrulewidth\fi
385
- \hrule\@height\headrulewidth\@width\headwidth \vskip-\headrulewidth}}
386
-
387
- \def\footrule{{\if@fancyplain\let\footrulewidth\plainfootrulewidth\fi
388
- \vskip-\footruleskip\vskip-\footrulewidth
389
- \hrule\@width\headwidth\@height\footrulewidth\vskip\footruleskip}}
390
-
391
- \def\ps@fancy{%
392
- \@ifundefined{@chapapp}{\let\@chapapp\chaptername}{}%for amsbook
393
- %
394
- % Define \MakeUppercase for old LaTeXen.
395
- % Note: we used \def rather than \let, so that \let\uppercase\relax (from
396
- % the version 1 documentation) will still work.
397
- %
398
- \@ifundefined{MakeUppercase}{\def\MakeUppercase{\uppercase}}{}%
399
- \@ifundefined{chapter}{\def\sectionmark##1{\markboth
400
- {\MakeUppercase{\ifnum \c@secnumdepth>\z@
401
- \thesection\hskip 1em\relax \fi ##1}}{}}%
402
- \def\subsectionmark##1{\markright {\ifnum \c@secnumdepth >\@ne
403
- \thesubsection\hskip 1em\relax \fi ##1}}}%
404
- {\def\chaptermark##1{\markboth {\MakeUppercase{\ifnum \c@secnumdepth>\m@ne
405
- \@chapapp\ \thechapter. \ \fi ##1}}{}}%
406
- \def\sectionmark##1{\markright{\MakeUppercase{\ifnum \c@secnumdepth >\z@
407
- \thesection. \ \fi ##1}}}}%
408
- %\csname ps@headings\endcsname % use \ps@headings defaults if they exist
409
- \ps@@fancy
410
- \gdef\ps@fancy{\@fancyplainfalse\ps@@fancy}%
411
- % Initialize \headwidth if the user didn't
412
- %
413
- \ifdim\headwidth<0sp
414
- %
415
- % This catches the case that \headwidth hasn't been initialized and the
416
- % case that the user added something to \headwidth in the expectation that
417
- % it was initialized to \textwidth. We compensate this now. This loses if
418
- % the user intended to multiply it by a factor. But that case is more
419
- % likely done by saying something like \headwidth=1.2\textwidth.
420
- % The doc says you have to change \headwidth after the first call to
421
- % \pagestyle{fancy}. This code is just to catch the most common cases were
422
- % that requirement is violated.
423
- %
424
- \global\advance\headwidth123456789sp\global\advance\headwidth\textwidth
425
- \fi}
426
- \def\ps@fancyplain{\ps@fancy \let\ps@plain\ps@plain@fancy}
427
- \def\ps@plain@fancy{\@fancyplaintrue\ps@@fancy}
428
- \let\ps@@empty\ps@empty
429
- \def\ps@@fancy{%
430
- \ps@@empty % This is for amsbook/amsart, which do strange things with \topskip
431
- \def\@mkboth{\protect\markboth}%
432
- \def\@oddhead{\@fancyhead\fancy@Oolh\f@ncyolh\f@ncyoch\f@ncyorh\fancy@Oorh}%
433
- \def\@oddfoot{\@fancyfoot\fancy@Oolf\f@ncyolf\f@ncyocf\f@ncyorf\fancy@Oorf}%
434
- \def\@evenhead{\@fancyhead\fancy@Oelh\f@ncyelh\f@ncyech\f@ncyerh\fancy@Oerh}%
435
- \def\@evenfoot{\@fancyfoot\fancy@Oelf\f@ncyelf\f@ncyecf\f@ncyerf\fancy@Oerf}%
436
- }
437
- % Default definitions for compatibility mode:
438
- % These cause the header/footer to take the defined \headwidth as width
439
- % And to shift in the direction of the marginpar area
440
-
441
- \def\fancy@Oolh{\if@reversemargin\hss\else\relax\fi}
442
- \def\fancy@Oorh{\if@reversemargin\relax\else\hss\fi}
443
- \let\fancy@Oelh\fancy@Oorh
444
- \let\fancy@Oerh\fancy@Oolh
445
-
446
- \let\fancy@Oolf\fancy@Oolh
447
- \let\fancy@Oorf\fancy@Oorh
448
- \let\fancy@Oelf\fancy@Oelh
449
- \let\fancy@Oerf\fancy@Oerh
450
-
451
- % New definitions for the use of \fancyhfoffset
452
- % These calculate the \headwidth from \textwidth and the specified offsets.
453
-
454
- \def\fancy@offsolh{\headwidth=\textwidth\advance\headwidth\f@ncyO@olh
455
- \advance\headwidth\f@ncyO@orh\hskip-\f@ncyO@olh}
456
- \def\fancy@offselh{\headwidth=\textwidth\advance\headwidth\f@ncyO@elh
457
- \advance\headwidth\f@ncyO@erh\hskip-\f@ncyO@elh}
458
-
459
- \def\fancy@offsolf{\headwidth=\textwidth\advance\headwidth\f@ncyO@olf
460
- \advance\headwidth\f@ncyO@orf\hskip-\f@ncyO@olf}
461
- \def\fancy@offself{\headwidth=\textwidth\advance\headwidth\f@ncyO@elf
462
- \advance\headwidth\f@ncyO@erf\hskip-\f@ncyO@elf}
463
-
464
- \def\fancy@setoffs{%
465
- % Just in case \let\headwidth\textwidth was used
466
- \fancy@gbl\let\headwidth\fancy@headwidth
467
- \fancy@gbl\let\fancy@Oolh\fancy@offsolh
468
- \fancy@gbl\let\fancy@Oelh\fancy@offselh
469
- \fancy@gbl\let\fancy@Oorh\hss
470
- \fancy@gbl\let\fancy@Oerh\hss
471
- \fancy@gbl\let\fancy@Oolf\fancy@offsolf
472
- \fancy@gbl\let\fancy@Oelf\fancy@offself
473
- \fancy@gbl\let\fancy@Oorf\hss
474
- \fancy@gbl\let\fancy@Oerf\hss}
475
-
476
- \newif\iffootnote
477
- \let\latex@makecol\@makecol
478
- \def\@makecol{\ifvoid\footins\footnotetrue\else\footnotefalse\fi
479
- \let\topfloat\@toplist\let\botfloat\@botlist\latex@makecol}
480
- \def\iftopfloat#1#2{\ifx\topfloat\empty #2\else #1\fi}
481
- \def\ifbotfloat#1#2{\ifx\botfloat\empty #2\else #1\fi}
482
- \def\iffloatpage#1#2{\if@fcolmade #1\else #2\fi}
483
-
484
- \newcommand{\fancypagestyle}[2]{%
485
- \@namedef{ps@#1}{\let\fancy@gbl\relax#2\relax\ps@fancy}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/fig.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/fig1.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/fig2.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/fig3.png DELETED
Binary file (13 kB)
 
outputs/outputs_20230608_115759/generation.log DELETED
The diff for this file is too large to render. See raw diff
 
outputs/outputs_20230608_115759/iclr2022_conference.bst DELETED
@@ -1,1440 +0,0 @@
1
- %% File: `iclr2017.bst'
2
- %% A copy of iclm2010.bst, which is a modification of `plainnl.bst' for use with natbib package
3
- %%
4
- %% Copyright 2010 Hal Daum\'e III
5
- %% Modified by J. F�rnkranz
6
- %% - Changed labels from (X and Y, 2000) to (X & Y, 2000)
7
- %%
8
- %% Copyright 1993-2007 Patrick W Daly
9
- %% Max-Planck-Institut f\"ur Sonnensystemforschung
10
- %% Max-Planck-Str. 2
11
- %% D-37191 Katlenburg-Lindau
12
- %% Germany
13
- %% E-mail: daly@mps.mpg.de
14
- %%
15
- %% This program can be redistributed and/or modified under the terms
16
- %% of the LaTeX Project Public License Distributed from CTAN
17
- %% archives in directory macros/latex/base/lppl.txt; either
18
- %% version 1 of the License, or any later version.
19
- %%
20
- % Version and source file information:
21
- % \ProvidesFile{icml2010.mbs}[2007/11/26 1.93 (PWD)]
22
- %
23
- % BibTeX `plainnat' family
24
- % version 0.99b for BibTeX versions 0.99a or later,
25
- % for LaTeX versions 2.09 and 2e.
26
- %
27
- % For use with the `natbib.sty' package; emulates the corresponding
28
- % member of the `plain' family, but with author-year citations.
29
- %
30
- % With version 6.0 of `natbib.sty', it may also be used for numerical
31
- % citations, while retaining the commands \citeauthor, \citefullauthor,
32
- % and \citeyear to print the corresponding information.
33
- %
34
- % For version 7.0 of `natbib.sty', the KEY field replaces missing
35
- % authors/editors, and the date is left blank in \bibitem.
36
- %
37
- % Includes field EID for the sequence/citation number of electronic journals
38
- % which is used instead of page numbers.
39
- %
40
- % Includes fields ISBN and ISSN.
41
- %
42
- % Includes field URL for Internet addresses.
43
- %
44
- % Includes field DOI for Digital Object Idenfifiers.
45
- %
46
- % Works best with the url.sty package of Donald Arseneau.
47
- %
48
- % Works with identical authors and year are further sorted by
49
- % citation key, to preserve any natural sequence.
50
- %
51
- ENTRY
52
- { address
53
- author
54
- booktitle
55
- chapter
56
- doi
57
- eid
58
- edition
59
- editor
60
- howpublished
61
- institution
62
- isbn
63
- issn
64
- journal
65
- key
66
- month
67
- note
68
- number
69
- organization
70
- pages
71
- publisher
72
- school
73
- series
74
- title
75
- type
76
- url
77
- volume
78
- year
79
- }
80
- {}
81
- { label extra.label sort.label short.list }
82
-
83
- INTEGERS { output.state before.all mid.sentence after.sentence after.block }
84
-
85
- FUNCTION {init.state.consts}
86
- { #0 'before.all :=
87
- #1 'mid.sentence :=
88
- #2 'after.sentence :=
89
- #3 'after.block :=
90
- }
91
-
92
- STRINGS { s t }
93
-
94
- FUNCTION {output.nonnull}
95
- { 's :=
96
- output.state mid.sentence =
97
- { ", " * write$ }
98
- { output.state after.block =
99
- { add.period$ write$
100
- newline$
101
- "\newblock " write$
102
- }
103
- { output.state before.all =
104
- 'write$
105
- { add.period$ " " * write$ }
106
- if$
107
- }
108
- if$
109
- mid.sentence 'output.state :=
110
- }
111
- if$
112
- s
113
- }
114
-
115
- FUNCTION {output}
116
- { duplicate$ empty$
117
- 'pop$
118
- 'output.nonnull
119
- if$
120
- }
121
-
122
- FUNCTION {output.check}
123
- { 't :=
124
- duplicate$ empty$
125
- { pop$ "empty " t * " in " * cite$ * warning$ }
126
- 'output.nonnull
127
- if$
128
- }
129
-
130
- FUNCTION {fin.entry}
131
- { add.period$
132
- write$
133
- newline$
134
- }
135
-
136
- FUNCTION {new.block}
137
- { output.state before.all =
138
- 'skip$
139
- { after.block 'output.state := }
140
- if$
141
- }
142
-
143
- FUNCTION {new.sentence}
144
- { output.state after.block =
145
- 'skip$
146
- { output.state before.all =
147
- 'skip$
148
- { after.sentence 'output.state := }
149
- if$
150
- }
151
- if$
152
- }
153
-
154
- FUNCTION {not}
155
- { { #0 }
156
- { #1 }
157
- if$
158
- }
159
-
160
- FUNCTION {and}
161
- { 'skip$
162
- { pop$ #0 }
163
- if$
164
- }
165
-
166
- FUNCTION {or}
167
- { { pop$ #1 }
168
- 'skip$
169
- if$
170
- }
171
-
172
- FUNCTION {new.block.checka}
173
- { empty$
174
- 'skip$
175
- 'new.block
176
- if$
177
- }
178
-
179
- FUNCTION {new.block.checkb}
180
- { empty$
181
- swap$ empty$
182
- and
183
- 'skip$
184
- 'new.block
185
- if$
186
- }
187
-
188
- FUNCTION {new.sentence.checka}
189
- { empty$
190
- 'skip$
191
- 'new.sentence
192
- if$
193
- }
194
-
195
- FUNCTION {new.sentence.checkb}
196
- { empty$
197
- swap$ empty$
198
- and
199
- 'skip$
200
- 'new.sentence
201
- if$
202
- }
203
-
204
- FUNCTION {field.or.null}
205
- { duplicate$ empty$
206
- { pop$ "" }
207
- 'skip$
208
- if$
209
- }
210
-
211
- FUNCTION {emphasize}
212
- { duplicate$ empty$
213
- { pop$ "" }
214
- { "\emph{" swap$ * "}" * }
215
- if$
216
- }
217
-
218
- INTEGERS { nameptr namesleft numnames }
219
-
220
- FUNCTION {format.names}
221
- { 's :=
222
- #1 'nameptr :=
223
- s num.names$ 'numnames :=
224
- numnames 'namesleft :=
225
- { namesleft #0 > }
226
- { s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't :=
227
- nameptr #1 >
228
- { namesleft #1 >
229
- { ", " * t * }
230
- { numnames #2 >
231
- { "," * }
232
- 'skip$
233
- if$
234
- t "others" =
235
- { " et~al." * }
236
- { " and " * t * }
237
- if$
238
- }
239
- if$
240
- }
241
- 't
242
- if$
243
- nameptr #1 + 'nameptr :=
244
- namesleft #1 - 'namesleft :=
245
- }
246
- while$
247
- }
248
-
249
- FUNCTION {format.key}
250
- { empty$
251
- { key field.or.null }
252
- { "" }
253
- if$
254
- }
255
-
256
- FUNCTION {format.authors}
257
- { author empty$
258
- { "" }
259
- { author format.names }
260
- if$
261
- }
262
-
263
- FUNCTION {format.editors}
264
- { editor empty$
265
- { "" }
266
- { editor format.names
267
- editor num.names$ #1 >
268
- { " (eds.)" * }
269
- { " (ed.)" * }
270
- if$
271
- }
272
- if$
273
- }
274
-
275
- FUNCTION {format.isbn}
276
- { isbn empty$
277
- { "" }
278
- { new.block "ISBN " isbn * }
279
- if$
280
- }
281
-
282
- FUNCTION {format.issn}
283
- { issn empty$
284
- { "" }
285
- { new.block "ISSN " issn * }
286
- if$
287
- }
288
-
289
- FUNCTION {format.url}
290
- { url empty$
291
- { "" }
292
- { new.block "URL \url{" url * "}" * }
293
- if$
294
- }
295
-
296
- FUNCTION {format.doi}
297
- { doi empty$
298
- { "" }
299
- { new.block "\doi{" doi * "}" * }
300
- if$
301
- }
302
-
303
- FUNCTION {format.title}
304
- { title empty$
305
- { "" }
306
- { title "t" change.case$ }
307
- if$
308
- }
309
-
310
- FUNCTION {format.full.names}
311
- {'s :=
312
- #1 'nameptr :=
313
- s num.names$ 'numnames :=
314
- numnames 'namesleft :=
315
- { namesleft #0 > }
316
- { s nameptr
317
- "{vv~}{ll}" format.name$ 't :=
318
- nameptr #1 >
319
- {
320
- namesleft #1 >
321
- { ", " * t * }
322
- {
323
- numnames #2 >
324
- { "," * }
325
- 'skip$
326
- if$
327
- t "others" =
328
- { " et~al." * }
329
- { " and " * t * }
330
- if$
331
- }
332
- if$
333
- }
334
- 't
335
- if$
336
- nameptr #1 + 'nameptr :=
337
- namesleft #1 - 'namesleft :=
338
- }
339
- while$
340
- }
341
-
342
- FUNCTION {author.editor.full}
343
- { author empty$
344
- { editor empty$
345
- { "" }
346
- { editor format.full.names }
347
- if$
348
- }
349
- { author format.full.names }
350
- if$
351
- }
352
-
353
- FUNCTION {author.full}
354
- { author empty$
355
- { "" }
356
- { author format.full.names }
357
- if$
358
- }
359
-
360
- FUNCTION {editor.full}
361
- { editor empty$
362
- { "" }
363
- { editor format.full.names }
364
- if$
365
- }
366
-
367
- FUNCTION {make.full.names}
368
- { type$ "book" =
369
- type$ "inbook" =
370
- or
371
- 'author.editor.full
372
- { type$ "proceedings" =
373
- 'editor.full
374
- 'author.full
375
- if$
376
- }
377
- if$
378
- }
379
-
380
- FUNCTION {output.bibitem}
381
- { newline$
382
- "\bibitem[" write$
383
- label write$
384
- ")" make.full.names duplicate$ short.list =
385
- { pop$ }
386
- { * }
387
- if$
388
- "]{" * write$
389
- cite$ write$
390
- "}" write$
391
- newline$
392
- ""
393
- before.all 'output.state :=
394
- }
395
-
396
- FUNCTION {n.dashify}
397
- { 't :=
398
- ""
399
- { t empty$ not }
400
- { t #1 #1 substring$ "-" =
401
- { t #1 #2 substring$ "--" = not
402
- { "--" *
403
- t #2 global.max$ substring$ 't :=
404
- }
405
- { { t #1 #1 substring$ "-" = }
406
- { "-" *
407
- t #2 global.max$ substring$ 't :=
408
- }
409
- while$
410
- }
411
- if$
412
- }
413
- { t #1 #1 substring$ *
414
- t #2 global.max$ substring$ 't :=
415
- }
416
- if$
417
- }
418
- while$
419
- }
420
-
421
- FUNCTION {format.date}
422
- { year duplicate$ empty$
423
- { "empty year in " cite$ * warning$
424
- pop$ "" }
425
- 'skip$
426
- if$
427
- month empty$
428
- 'skip$
429
- { month
430
- " " * swap$ *
431
- }
432
- if$
433
- extra.label *
434
- }
435
-
436
- FUNCTION {format.btitle}
437
- { title emphasize
438
- }
439
-
440
- FUNCTION {tie.or.space.connect}
441
- { duplicate$ text.length$ #3 <
442
- { "~" }
443
- { " " }
444
- if$
445
- swap$ * *
446
- }
447
-
448
- FUNCTION {either.or.check}
449
- { empty$
450
- 'pop$
451
- { "can't use both " swap$ * " fields in " * cite$ * warning$ }
452
- if$
453
- }
454
-
455
- FUNCTION {format.bvolume}
456
- { volume empty$
457
- { "" }
458
- { "volume" volume tie.or.space.connect
459
- series empty$
460
- 'skip$
461
- { " of " * series emphasize * }
462
- if$
463
- "volume and number" number either.or.check
464
- }
465
- if$
466
- }
467
-
468
- FUNCTION {format.number.series}
469
- { volume empty$
470
- { number empty$
471
- { series field.or.null }
472
- { output.state mid.sentence =
473
- { "number" }
474
- { "Number" }
475
- if$
476
- number tie.or.space.connect
477
- series empty$
478
- { "there's a number but no series in " cite$ * warning$ }
479
- { " in " * series * }
480
- if$
481
- }
482
- if$
483
- }
484
- { "" }
485
- if$
486
- }
487
-
488
- FUNCTION {format.edition}
489
- { edition empty$
490
- { "" }
491
- { output.state mid.sentence =
492
- { edition "l" change.case$ " edition" * }
493
- { edition "t" change.case$ " edition" * }
494
- if$
495
- }
496
- if$
497
- }
498
-
499
- INTEGERS { multiresult }
500
-
501
- FUNCTION {multi.page.check}
502
- { 't :=
503
- #0 'multiresult :=
504
- { multiresult not
505
- t empty$ not
506
- and
507
- }
508
- { t #1 #1 substring$
509
- duplicate$ "-" =
510
- swap$ duplicate$ "," =
511
- swap$ "+" =
512
- or or
513
- { #1 'multiresult := }
514
- { t #2 global.max$ substring$ 't := }
515
- if$
516
- }
517
- while$
518
- multiresult
519
- }
520
-
521
- FUNCTION {format.pages}
522
- { pages empty$
523
- { "" }
524
- { pages multi.page.check
525
- { "pp.\ " pages n.dashify tie.or.space.connect }
526
- { "pp.\ " pages tie.or.space.connect }
527
- if$
528
- }
529
- if$
530
- }
531
-
532
- FUNCTION {format.eid}
533
- { eid empty$
534
- { "" }
535
- { "art." eid tie.or.space.connect }
536
- if$
537
- }
538
-
539
- FUNCTION {format.vol.num.pages}
540
- { volume field.or.null
541
- number empty$
542
- 'skip$
543
- { "\penalty0 (" number * ")" * *
544
- volume empty$
545
- { "there's a number but no volume in " cite$ * warning$ }
546
- 'skip$
547
- if$
548
- }
549
- if$
550
- pages empty$
551
- 'skip$
552
- { duplicate$ empty$
553
- { pop$ format.pages }
554
- { ":\penalty0 " * pages n.dashify * }
555
- if$
556
- }
557
- if$
558
- }
559
-
560
- FUNCTION {format.vol.num.eid}
561
- { volume field.or.null
562
- number empty$
563
- 'skip$
564
- { "\penalty0 (" number * ")" * *
565
- volume empty$
566
- { "there's a number but no volume in " cite$ * warning$ }
567
- 'skip$
568
- if$
569
- }
570
- if$
571
- eid empty$
572
- 'skip$
573
- { duplicate$ empty$
574
- { pop$ format.eid }
575
- { ":\penalty0 " * eid * }
576
- if$
577
- }
578
- if$
579
- }
580
-
581
- FUNCTION {format.chapter.pages}
582
- { chapter empty$
583
- 'format.pages
584
- { type empty$
585
- { "chapter" }
586
- { type "l" change.case$ }
587
- if$
588
- chapter tie.or.space.connect
589
- pages empty$
590
- 'skip$
591
- { ", " * format.pages * }
592
- if$
593
- }
594
- if$
595
- }
596
-
597
- FUNCTION {format.in.ed.booktitle}
598
- { booktitle empty$
599
- { "" }
600
- { editor empty$
601
- { "In " booktitle emphasize * }
602
- { "In " format.editors * ", " * booktitle emphasize * }
603
- if$
604
- }
605
- if$
606
- }
607
-
608
- FUNCTION {empty.misc.check}
609
- { author empty$ title empty$ howpublished empty$
610
- month empty$ year empty$ note empty$
611
- and and and and and
612
- key empty$ not and
613
- { "all relevant fields are empty in " cite$ * warning$ }
614
- 'skip$
615
- if$
616
- }
617
-
618
- FUNCTION {format.thesis.type}
619
- { type empty$
620
- 'skip$
621
- { pop$
622
- type "t" change.case$
623
- }
624
- if$
625
- }
626
-
627
- FUNCTION {format.tr.number}
628
- { type empty$
629
- { "Technical Report" }
630
- 'type
631
- if$
632
- number empty$
633
- { "t" change.case$ }
634
- { number tie.or.space.connect }
635
- if$
636
- }
637
-
638
- FUNCTION {format.article.crossref}
639
- { key empty$
640
- { journal empty$
641
- { "need key or journal for " cite$ * " to crossref " * crossref *
642
- warning$
643
- ""
644
- }
645
- { "In \emph{" journal * "}" * }
646
- if$
647
- }
648
- { "In " }
649
- if$
650
- " \citet{" * crossref * "}" *
651
- }
652
-
653
- FUNCTION {format.book.crossref}
654
- { volume empty$
655
- { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
656
- "In "
657
- }
658
- { "Volume" volume tie.or.space.connect
659
- " of " *
660
- }
661
- if$
662
- editor empty$
663
- editor field.or.null author field.or.null =
664
- or
665
- { key empty$
666
- { series empty$
667
- { "need editor, key, or series for " cite$ * " to crossref " *
668
- crossref * warning$
669
- "" *
670
- }
671
- { "\emph{" * series * "}" * }
672
- if$
673
- }
674
- 'skip$
675
- if$
676
- }
677
- 'skip$
678
- if$
679
- " \citet{" * crossref * "}" *
680
- }
681
-
682
- FUNCTION {format.incoll.inproc.crossref}
683
- { editor empty$
684
- editor field.or.null author field.or.null =
685
- or
686
- { key empty$
687
- { booktitle empty$
688
- { "need editor, key, or booktitle for " cite$ * " to crossref " *
689
- crossref * warning$
690
- ""
691
- }
692
- { "In \emph{" booktitle * "}" * }
693
- if$
694
- }
695
- { "In " }
696
- if$
697
- }
698
- { "In " }
699
- if$
700
- " \citet{" * crossref * "}" *
701
- }
702
-
703
- FUNCTION {article}
704
- { output.bibitem
705
- format.authors "author" output.check
706
- author format.key output
707
- new.block
708
- format.title "title" output.check
709
- new.block
710
- crossref missing$
711
- { journal emphasize "journal" output.check
712
- eid empty$
713
- { format.vol.num.pages output }
714
- { format.vol.num.eid output }
715
- if$
716
- format.date "year" output.check
717
- }
718
- { format.article.crossref output.nonnull
719
- eid empty$
720
- { format.pages output }
721
- { format.eid output }
722
- if$
723
- }
724
- if$
725
- format.issn output
726
- format.doi output
727
- format.url output
728
- new.block
729
- note output
730
- fin.entry
731
- }
732
-
733
- FUNCTION {book}
734
- { output.bibitem
735
- author empty$
736
- { format.editors "author and editor" output.check
737
- editor format.key output
738
- }
739
- { format.authors output.nonnull
740
- crossref missing$
741
- { "author and editor" editor either.or.check }
742
- 'skip$
743
- if$
744
- }
745
- if$
746
- new.block
747
- format.btitle "title" output.check
748
- crossref missing$
749
- { format.bvolume output
750
- new.block
751
- format.number.series output
752
- new.sentence
753
- publisher "publisher" output.check
754
- address output
755
- }
756
- { new.block
757
- format.book.crossref output.nonnull
758
- }
759
- if$
760
- format.edition output
761
- format.date "year" output.check
762
- format.isbn output
763
- format.doi output
764
- format.url output
765
- new.block
766
- note output
767
- fin.entry
768
- }
769
-
770
- FUNCTION {booklet}
771
- { output.bibitem
772
- format.authors output
773
- author format.key output
774
- new.block
775
- format.title "title" output.check
776
- howpublished address new.block.checkb
777
- howpublished output
778
- address output
779
- format.date output
780
- format.isbn output
781
- format.doi output
782
- format.url output
783
- new.block
784
- note output
785
- fin.entry
786
- }
787
-
788
- FUNCTION {inbook}
789
- { output.bibitem
790
- author empty$
791
- { format.editors "author and editor" output.check
792
- editor format.key output
793
- }
794
- { format.authors output.nonnull
795
- crossref missing$
796
- { "author and editor" editor either.or.check }
797
- 'skip$
798
- if$
799
- }
800
- if$
801
- new.block
802
- format.btitle "title" output.check
803
- crossref missing$
804
- { format.bvolume output
805
- format.chapter.pages "chapter and pages" output.check
806
- new.block
807
- format.number.series output
808
- new.sentence
809
- publisher "publisher" output.check
810
- address output
811
- }
812
- { format.chapter.pages "chapter and pages" output.check
813
- new.block
814
- format.book.crossref output.nonnull
815
- }
816
- if$
817
- format.edition output
818
- format.date "year" output.check
819
- format.isbn output
820
- format.doi output
821
- format.url output
822
- new.block
823
- note output
824
- fin.entry
825
- }
826
-
827
- FUNCTION {incollection}
828
- { output.bibitem
829
- format.authors "author" output.check
830
- author format.key output
831
- new.block
832
- format.title "title" output.check
833
- new.block
834
- crossref missing$
835
- { format.in.ed.booktitle "booktitle" output.check
836
- format.bvolume output
837
- format.number.series output
838
- format.chapter.pages output
839
- new.sentence
840
- publisher "publisher" output.check
841
- address output
842
- format.edition output
843
- format.date "year" output.check
844
- }
845
- { format.incoll.inproc.crossref output.nonnull
846
- format.chapter.pages output
847
- }
848
- if$
849
- format.isbn output
850
- format.doi output
851
- format.url output
852
- new.block
853
- note output
854
- fin.entry
855
- }
856
-
857
- FUNCTION {inproceedings}
858
- { output.bibitem
859
- format.authors "author" output.check
860
- author format.key output
861
- new.block
862
- format.title "title" output.check
863
- new.block
864
- crossref missing$
865
- { format.in.ed.booktitle "booktitle" output.check
866
- format.bvolume output
867
- format.number.series output
868
- format.pages output
869
- address empty$
870
- { organization publisher new.sentence.checkb
871
- organization output
872
- publisher output
873
- format.date "year" output.check
874
- }
875
- { address output.nonnull
876
- format.date "year" output.check
877
- new.sentence
878
- organization output
879
- publisher output
880
- }
881
- if$
882
- }
883
- { format.incoll.inproc.crossref output.nonnull
884
- format.pages output
885
- }
886
- if$
887
- format.isbn output
888
- format.doi output
889
- format.url output
890
- new.block
891
- note output
892
- fin.entry
893
- }
894
-
895
- FUNCTION {conference} { inproceedings }
896
-
897
- FUNCTION {manual}
898
- { output.bibitem
899
- format.authors output
900
- author format.key output
901
- new.block
902
- format.btitle "title" output.check
903
- organization address new.block.checkb
904
- organization output
905
- address output
906
- format.edition output
907
- format.date output
908
- format.url output
909
- new.block
910
- note output
911
- fin.entry
912
- }
913
-
914
- FUNCTION {mastersthesis}
915
- { output.bibitem
916
- format.authors "author" output.check
917
- author format.key output
918
- new.block
919
- format.title "title" output.check
920
- new.block
921
- "Master's thesis" format.thesis.type output.nonnull
922
- school "school" output.check
923
- address output
924
- format.date "year" output.check
925
- format.url output
926
- new.block
927
- note output
928
- fin.entry
929
- }
930
-
931
- FUNCTION {misc}
932
- { output.bibitem
933
- format.authors output
934
- author format.key output
935
- title howpublished new.block.checkb
936
- format.title output
937
- howpublished new.block.checka
938
- howpublished output
939
- format.date output
940
- format.issn output
941
- format.url output
942
- new.block
943
- note output
944
- fin.entry
945
- empty.misc.check
946
- }
947
-
948
- FUNCTION {phdthesis}
949
- { output.bibitem
950
- format.authors "author" output.check
951
- author format.key output
952
- new.block
953
- format.btitle "title" output.check
954
- new.block
955
- "PhD thesis" format.thesis.type output.nonnull
956
- school "school" output.check
957
- address output
958
- format.date "year" output.check
959
- format.url output
960
- new.block
961
- note output
962
- fin.entry
963
- }
964
-
965
- FUNCTION {proceedings}
966
- { output.bibitem
967
- format.editors output
968
- editor format.key output
969
- new.block
970
- format.btitle "title" output.check
971
- format.bvolume output
972
- format.number.series output
973
- address output
974
- format.date "year" output.check
975
- new.sentence
976
- organization output
977
- publisher output
978
- format.isbn output
979
- format.doi output
980
- format.url output
981
- new.block
982
- note output
983
- fin.entry
984
- }
985
-
986
- FUNCTION {techreport}
987
- { output.bibitem
988
- format.authors "author" output.check
989
- author format.key output
990
- new.block
991
- format.title "title" output.check
992
- new.block
993
- format.tr.number output.nonnull
994
- institution "institution" output.check
995
- address output
996
- format.date "year" output.check
997
- format.url output
998
- new.block
999
- note output
1000
- fin.entry
1001
- }
1002
-
1003
- FUNCTION {unpublished}
1004
- { output.bibitem
1005
- format.authors "author" output.check
1006
- author format.key output
1007
- new.block
1008
- format.title "title" output.check
1009
- new.block
1010
- note "note" output.check
1011
- format.date output
1012
- format.url output
1013
- fin.entry
1014
- }
1015
-
1016
- FUNCTION {default.type} { misc }
1017
-
1018
-
1019
- MACRO {jan} {"January"}
1020
-
1021
- MACRO {feb} {"February"}
1022
-
1023
- MACRO {mar} {"March"}
1024
-
1025
- MACRO {apr} {"April"}
1026
-
1027
- MACRO {may} {"May"}
1028
-
1029
- MACRO {jun} {"June"}
1030
-
1031
- MACRO {jul} {"July"}
1032
-
1033
- MACRO {aug} {"August"}
1034
-
1035
- MACRO {sep} {"September"}
1036
-
1037
- MACRO {oct} {"October"}
1038
-
1039
- MACRO {nov} {"November"}
1040
-
1041
- MACRO {dec} {"December"}
1042
-
1043
-
1044
-
1045
- MACRO {acmcs} {"ACM Computing Surveys"}
1046
-
1047
- MACRO {acta} {"Acta Informatica"}
1048
-
1049
- MACRO {cacm} {"Communications of the ACM"}
1050
-
1051
- MACRO {ibmjrd} {"IBM Journal of Research and Development"}
1052
-
1053
- MACRO {ibmsj} {"IBM Systems Journal"}
1054
-
1055
- MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
1056
-
1057
- MACRO {ieeetc} {"IEEE Transactions on Computers"}
1058
-
1059
- MACRO {ieeetcad}
1060
- {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
1061
-
1062
- MACRO {ipl} {"Information Processing Letters"}
1063
-
1064
- MACRO {jacm} {"Journal of the ACM"}
1065
-
1066
- MACRO {jcss} {"Journal of Computer and System Sciences"}
1067
-
1068
- MACRO {scp} {"Science of Computer Programming"}
1069
-
1070
- MACRO {sicomp} {"SIAM Journal on Computing"}
1071
-
1072
- MACRO {tocs} {"ACM Transactions on Computer Systems"}
1073
-
1074
- MACRO {tods} {"ACM Transactions on Database Systems"}
1075
-
1076
- MACRO {tog} {"ACM Transactions on Graphics"}
1077
-
1078
- MACRO {toms} {"ACM Transactions on Mathematical Software"}
1079
-
1080
- MACRO {toois} {"ACM Transactions on Office Information Systems"}
1081
-
1082
- MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
1083
-
1084
- MACRO {tcs} {"Theoretical Computer Science"}
1085
-
1086
-
1087
- READ
1088
-
1089
- FUNCTION {sortify}
1090
- { purify$
1091
- "l" change.case$
1092
- }
1093
-
1094
- INTEGERS { len }
1095
-
1096
- FUNCTION {chop.word}
1097
- { 's :=
1098
- 'len :=
1099
- s #1 len substring$ =
1100
- { s len #1 + global.max$ substring$ }
1101
- 's
1102
- if$
1103
- }
1104
-
1105
- FUNCTION {format.lab.names}
1106
- { 's :=
1107
- s #1 "{vv~}{ll}" format.name$
1108
- s num.names$ duplicate$
1109
- #2 >
1110
- { pop$ " et~al." * }
1111
- { #2 <
1112
- 'skip$
1113
- { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
1114
- { " et~al." * }
1115
- { " \& " * s #2 "{vv~}{ll}" format.name$ * }
1116
- if$
1117
- }
1118
- if$
1119
- }
1120
- if$
1121
- }
1122
-
1123
- FUNCTION {author.key.label}
1124
- { author empty$
1125
- { key empty$
1126
- { cite$ #1 #3 substring$ }
1127
- 'key
1128
- if$
1129
- }
1130
- { author format.lab.names }
1131
- if$
1132
- }
1133
-
1134
- FUNCTION {author.editor.key.label}
1135
- { author empty$
1136
- { editor empty$
1137
- { key empty$
1138
- { cite$ #1 #3 substring$ }
1139
- 'key
1140
- if$
1141
- }
1142
- { editor format.lab.names }
1143
- if$
1144
- }
1145
- { author format.lab.names }
1146
- if$
1147
- }
1148
-
1149
- FUNCTION {author.key.organization.label}
1150
- { author empty$
1151
- { key empty$
1152
- { organization empty$
1153
- { cite$ #1 #3 substring$ }
1154
- { "The " #4 organization chop.word #3 text.prefix$ }
1155
- if$
1156
- }
1157
- 'key
1158
- if$
1159
- }
1160
- { author format.lab.names }
1161
- if$
1162
- }
1163
-
1164
- FUNCTION {editor.key.organization.label}
1165
- { editor empty$
1166
- { key empty$
1167
- { organization empty$
1168
- { cite$ #1 #3 substring$ }
1169
- { "The " #4 organization chop.word #3 text.prefix$ }
1170
- if$
1171
- }
1172
- 'key
1173
- if$
1174
- }
1175
- { editor format.lab.names }
1176
- if$
1177
- }
1178
-
1179
- FUNCTION {calc.short.authors}
1180
- { type$ "book" =
1181
- type$ "inbook" =
1182
- or
1183
- 'author.editor.key.label
1184
- { type$ "proceedings" =
1185
- 'editor.key.organization.label
1186
- { type$ "manual" =
1187
- 'author.key.organization.label
1188
- 'author.key.label
1189
- if$
1190
- }
1191
- if$
1192
- }
1193
- if$
1194
- 'short.list :=
1195
- }
1196
-
1197
- FUNCTION {calc.label}
1198
- { calc.short.authors
1199
- short.list
1200
- "("
1201
- *
1202
- year duplicate$ empty$
1203
- short.list key field.or.null = or
1204
- { pop$ "" }
1205
- 'skip$
1206
- if$
1207
- *
1208
- 'label :=
1209
- }
1210
-
1211
- FUNCTION {sort.format.names}
1212
- { 's :=
1213
- #1 'nameptr :=
1214
- ""
1215
- s num.names$ 'numnames :=
1216
- numnames 'namesleft :=
1217
- { namesleft #0 > }
1218
- {
1219
- s nameptr "{vv{ } }{ll{ }}{ ff{ }}{ jj{ }}" format.name$ 't :=
1220
- nameptr #1 >
1221
- {
1222
- " " *
1223
- namesleft #1 = t "others" = and
1224
- { "zzzzz" * }
1225
- { numnames #2 > nameptr #2 = and
1226
- { "zz" * year field.or.null * " " * }
1227
- 'skip$
1228
- if$
1229
- t sortify *
1230
- }
1231
- if$
1232
- }
1233
- { t sortify * }
1234
- if$
1235
- nameptr #1 + 'nameptr :=
1236
- namesleft #1 - 'namesleft :=
1237
- }
1238
- while$
1239
- }
1240
-
1241
- FUNCTION {sort.format.title}
1242
- { 't :=
1243
- "A " #2
1244
- "An " #3
1245
- "The " #4 t chop.word
1246
- chop.word
1247
- chop.word
1248
- sortify
1249
- #1 global.max$ substring$
1250
- }
1251
-
1252
- FUNCTION {author.sort}
1253
- { author empty$
1254
- { key empty$
1255
- { "to sort, need author or key in " cite$ * warning$
1256
- ""
1257
- }
1258
- { key sortify }
1259
- if$
1260
- }
1261
- { author sort.format.names }
1262
- if$
1263
- }
1264
-
1265
- FUNCTION {author.editor.sort}
1266
- { author empty$
1267
- { editor empty$
1268
- { key empty$
1269
- { "to sort, need author, editor, or key in " cite$ * warning$
1270
- ""
1271
- }
1272
- { key sortify }
1273
- if$
1274
- }
1275
- { editor sort.format.names }
1276
- if$
1277
- }
1278
- { author sort.format.names }
1279
- if$
1280
- }
1281
-
1282
- FUNCTION {author.organization.sort}
1283
- { author empty$
1284
- { organization empty$
1285
- { key empty$
1286
- { "to sort, need author, organization, or key in " cite$ * warning$
1287
- ""
1288
- }
1289
- { key sortify }
1290
- if$
1291
- }
1292
- { "The " #4 organization chop.word sortify }
1293
- if$
1294
- }
1295
- { author sort.format.names }
1296
- if$
1297
- }
1298
-
1299
- FUNCTION {editor.organization.sort}
1300
- { editor empty$
1301
- { organization empty$
1302
- { key empty$
1303
- { "to sort, need editor, organization, or key in " cite$ * warning$
1304
- ""
1305
- }
1306
- { key sortify }
1307
- if$
1308
- }
1309
- { "The " #4 organization chop.word sortify }
1310
- if$
1311
- }
1312
- { editor sort.format.names }
1313
- if$
1314
- }
1315
-
1316
-
1317
- FUNCTION {presort}
1318
- { calc.label
1319
- label sortify
1320
- " "
1321
- *
1322
- type$ "book" =
1323
- type$ "inbook" =
1324
- or
1325
- 'author.editor.sort
1326
- { type$ "proceedings" =
1327
- 'editor.organization.sort
1328
- { type$ "manual" =
1329
- 'author.organization.sort
1330
- 'author.sort
1331
- if$
1332
- }
1333
- if$
1334
- }
1335
- if$
1336
- " "
1337
- *
1338
- year field.or.null sortify
1339
- *
1340
- " "
1341
- *
1342
- cite$
1343
- *
1344
- #1 entry.max$ substring$
1345
- 'sort.label :=
1346
- sort.label *
1347
- #1 entry.max$ substring$
1348
- 'sort.key$ :=
1349
- }
1350
-
1351
- ITERATE {presort}
1352
-
1353
- SORT
1354
-
1355
- STRINGS { longest.label last.label next.extra }
1356
-
1357
- INTEGERS { longest.label.width last.extra.num number.label }
1358
-
1359
- FUNCTION {initialize.longest.label}
1360
- { "" 'longest.label :=
1361
- #0 int.to.chr$ 'last.label :=
1362
- "" 'next.extra :=
1363
- #0 'longest.label.width :=
1364
- #0 'last.extra.num :=
1365
- #0 'number.label :=
1366
- }
1367
-
1368
- FUNCTION {forward.pass}
1369
- { last.label label =
1370
- { last.extra.num #1 + 'last.extra.num :=
1371
- last.extra.num int.to.chr$ 'extra.label :=
1372
- }
1373
- { "a" chr.to.int$ 'last.extra.num :=
1374
- "" 'extra.label :=
1375
- label 'last.label :=
1376
- }
1377
- if$
1378
- number.label #1 + 'number.label :=
1379
- }
1380
-
1381
- FUNCTION {reverse.pass}
1382
- { next.extra "b" =
1383
- { "a" 'extra.label := }
1384
- 'skip$
1385
- if$
1386
- extra.label 'next.extra :=
1387
- extra.label
1388
- duplicate$ empty$
1389
- 'skip$
1390
- { "{\natexlab{" swap$ * "}}" * }
1391
- if$
1392
- 'extra.label :=
1393
- label extra.label * 'label :=
1394
- }
1395
-
1396
- EXECUTE {initialize.longest.label}
1397
-
1398
- ITERATE {forward.pass}
1399
-
1400
- REVERSE {reverse.pass}
1401
-
1402
- FUNCTION {bib.sort.order}
1403
- { sort.label 'sort.key$ :=
1404
- }
1405
-
1406
- ITERATE {bib.sort.order}
1407
-
1408
- SORT
1409
-
1410
- FUNCTION {begin.bib}
1411
- { preamble$ empty$
1412
- 'skip$
1413
- { preamble$ write$ newline$ }
1414
- if$
1415
- "\begin{thebibliography}{" number.label int.to.str$ * "}" *
1416
- write$ newline$
1417
- "\providecommand{\natexlab}[1]{#1}"
1418
- write$ newline$
1419
- "\providecommand{\url}[1]{\texttt{#1}}"
1420
- write$ newline$
1421
- "\expandafter\ifx\csname urlstyle\endcsname\relax"
1422
- write$ newline$
1423
- " \providecommand{\doi}[1]{doi: #1}\else"
1424
- write$ newline$
1425
- " \providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi"
1426
- write$ newline$
1427
- }
1428
-
1429
- EXECUTE {begin.bib}
1430
-
1431
- EXECUTE {init.state.consts}
1432
-
1433
- ITERATE {call.type$}
1434
-
1435
- FUNCTION {end.bib}
1436
- { newline$
1437
- "\end{thebibliography}" write$ newline$
1438
- }
1439
-
1440
- EXECUTE {end.bib}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/iclr2022_conference.sty DELETED
@@ -1,245 +0,0 @@
1
- %%%% ICLR Macros (LaTex)
2
- %%%% Adapted by Hugo Larochelle from the NIPS stylefile Macros
3
- %%%% Style File
4
- %%%% Dec 12, 1990 Rev Aug 14, 1991; Sept, 1995; April, 1997; April, 1999; October 2014
5
-
6
- % This file can be used with Latex2e whether running in main mode, or
7
- % 2.09 compatibility mode.
8
- %
9
- % If using main mode, you need to include the commands
10
- % \documentclass{article}
11
- % \usepackage{iclr14submit_e,times}
12
- %
13
-
14
- % Change the overall width of the page. If these parameters are
15
- % changed, they will require corresponding changes in the
16
- % maketitle section.
17
- %
18
- \usepackage{eso-pic} % used by \AddToShipoutPicture
19
- \RequirePackage{fancyhdr}
20
- \RequirePackage{natbib}
21
-
22
- % modification to natbib citations
23
- \setcitestyle{authoryear,round,citesep={;},aysep={,},yysep={;}}
24
-
25
- \renewcommand{\topfraction}{0.95} % let figure take up nearly whole page
26
- \renewcommand{\textfraction}{0.05} % let figure take up nearly whole page
27
-
28
- % Define iclrfinal, set to true if iclrfinalcopy is defined
29
- \newif\ificlrfinal
30
- \iclrfinalfalse
31
- \def\iclrfinalcopy{\iclrfinaltrue}
32
- \font\iclrtenhv = phvb at 8pt
33
-
34
- % Specify the dimensions of each page
35
-
36
- \setlength{\paperheight}{11in}
37
- \setlength{\paperwidth}{8.5in}
38
-
39
-
40
- \oddsidemargin .5in % Note \oddsidemargin = \evensidemargin
41
- \evensidemargin .5in
42
- \marginparwidth 0.07 true in
43
- %\marginparwidth 0.75 true in
44
- %\topmargin 0 true pt % Nominal distance from top of page to top of
45
- %\topmargin 0.125in
46
- \topmargin -0.625in
47
- \addtolength{\headsep}{0.25in}
48
- \textheight 9.0 true in % Height of text (including footnotes & figures)
49
- \textwidth 5.5 true in % Width of text line.
50
- \widowpenalty=10000
51
- \clubpenalty=10000
52
-
53
- % \thispagestyle{empty} \pagestyle{empty}
54
- \flushbottom \sloppy
55
-
56
- % We're never going to need a table of contents, so just flush it to
57
- % save space --- suggested by drstrip@sandia-2
58
- \def\addcontentsline#1#2#3{}
59
-
60
- % Title stuff, taken from deproc.
61
- \def\maketitle{\par
62
- \begingroup
63
- \def\thefootnote{\fnsymbol{footnote}}
64
- \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}} % for perfect author
65
- % name centering
66
- % The footnote-mark was overlapping the footnote-text,
67
- % added the following to fix this problem (MK)
68
- \long\def\@makefntext##1{\parindent 1em\noindent
69
- \hbox to1.8em{\hss $\m@th ^{\@thefnmark}$}##1}
70
- \@maketitle \@thanks
71
- \endgroup
72
- \setcounter{footnote}{0}
73
- \let\maketitle\relax \let\@maketitle\relax
74
- \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}
75
-
76
- % The toptitlebar has been raised to top-justify the first page
77
-
78
- \usepackage{fancyhdr}
79
- \pagestyle{fancy}
80
- \fancyhead{}
81
-
82
- % Title (includes both anonimized and non-anonimized versions)
83
- \def\@maketitle{\vbox{\hsize\textwidth
84
- %\linewidth\hsize \vskip 0.1in \toptitlebar \centering
85
- {\LARGE\sc \@title\par}
86
- %\bottomtitlebar % \vskip 0.1in % minus
87
- \ificlrfinal
88
- \lhead{Published as a conference paper at ICLR 2022}
89
- \def\And{\end{tabular}\hfil\linebreak[0]\hfil
90
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}\ignorespaces}%
91
- \def\AND{\end{tabular}\hfil\linebreak[4]\hfil
92
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}\ignorespaces}%
93
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}\@author\end{tabular}%
94
- \else
95
- \lhead{Under review as a conference paper at ICLR 2022}
96
- \def\And{\end{tabular}\hfil\linebreak[0]\hfil
97
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}\ignorespaces}%
98
- \def\AND{\end{tabular}\hfil\linebreak[4]\hfil
99
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}\ignorespaces}%
100
- \begin{tabular}[t]{l}\bf\rule{\z@}{24pt}Anonymous authors\\Paper under double-blind review\end{tabular}%
101
- \fi
102
- \vskip 0.3in minus 0.1in}}
103
-
104
- \renewenvironment{abstract}{\vskip.075in\centerline{\large\sc
105
- Abstract}\vspace{0.5ex}\begin{quote}}{\par\end{quote}\vskip 1ex}
106
-
107
- % sections with less space
108
- \def\section{\@startsection {section}{1}{\z@}{-2.0ex plus
109
- -0.5ex minus -.2ex}{1.5ex plus 0.3ex
110
- minus0.2ex}{\large\sc\raggedright}}
111
-
112
- \def\subsection{\@startsection{subsection}{2}{\z@}{-1.8ex plus
113
- -0.5ex minus -.2ex}{0.8ex plus .2ex}{\normalsize\sc\raggedright}}
114
- \def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-1.5ex
115
- plus -0.5ex minus -.2ex}{0.5ex plus
116
- .2ex}{\normalsize\sc\raggedright}}
117
- \def\paragraph{\@startsection{paragraph}{4}{\z@}{1.5ex plus
118
- 0.5ex minus .2ex}{-1em}{\normalsize\bf}}
119
- \def\subparagraph{\@startsection{subparagraph}{5}{\z@}{1.5ex plus
120
- 0.5ex minus .2ex}{-1em}{\normalsize\sc}}
121
- \def\subsubsubsection{\vskip
122
- 5pt{\noindent\normalsize\rm\raggedright}}
123
-
124
-
125
- % Footnotes
126
- \footnotesep 6.65pt %
127
- \skip\footins 9pt plus 4pt minus 2pt
128
- \def\footnoterule{\kern-3pt \hrule width 12pc \kern 2.6pt }
129
- \setcounter{footnote}{0}
130
-
131
- % Lists and paragraphs
132
- \parindent 0pt
133
- \topsep 4pt plus 1pt minus 2pt
134
- \partopsep 1pt plus 0.5pt minus 0.5pt
135
- \itemsep 2pt plus 1pt minus 0.5pt
136
- \parsep 2pt plus 1pt minus 0.5pt
137
- \parskip .5pc
138
-
139
-
140
- %\leftmargin2em
141
- \leftmargin3pc
142
- \leftmargini\leftmargin \leftmarginii 2em
143
- \leftmarginiii 1.5em \leftmarginiv 1.0em \leftmarginv .5em
144
-
145
- %\labelsep \labelsep 5pt
146
-
147
- \def\@listi{\leftmargin\leftmargini}
148
- \def\@listii{\leftmargin\leftmarginii
149
- \labelwidth\leftmarginii\advance\labelwidth-\labelsep
150
- \topsep 2pt plus 1pt minus 0.5pt
151
- \parsep 1pt plus 0.5pt minus 0.5pt
152
- \itemsep \parsep}
153
- \def\@listiii{\leftmargin\leftmarginiii
154
- \labelwidth\leftmarginiii\advance\labelwidth-\labelsep
155
- \topsep 1pt plus 0.5pt minus 0.5pt
156
- \parsep \z@ \partopsep 0.5pt plus 0pt minus 0.5pt
157
- \itemsep \topsep}
158
- \def\@listiv{\leftmargin\leftmarginiv
159
- \labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
160
- \def\@listv{\leftmargin\leftmarginv
161
- \labelwidth\leftmarginv\advance\labelwidth-\labelsep}
162
- \def\@listvi{\leftmargin\leftmarginvi
163
- \labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
164
-
165
- \abovedisplayskip 7pt plus2pt minus5pt%
166
- \belowdisplayskip \abovedisplayskip
167
- \abovedisplayshortskip 0pt plus3pt%
168
- \belowdisplayshortskip 4pt plus3pt minus3pt%
169
-
170
- % Less leading in most fonts (due to the narrow columns)
171
- % The choices were between 1-pt and 1.5-pt leading
172
- %\def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % got rid of @ (MK)
173
- \def\normalsize{\@setsize\normalsize{11pt}\xpt\@xpt}
174
- \def\small{\@setsize\small{10pt}\ixpt\@ixpt}
175
- \def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt}
176
- \def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt}
177
- \def\tiny{\@setsize\tiny{7pt}\vipt\@vipt}
178
- \def\large{\@setsize\large{14pt}\xiipt\@xiipt}
179
- \def\Large{\@setsize\Large{16pt}\xivpt\@xivpt}
180
- \def\LARGE{\@setsize\LARGE{20pt}\xviipt\@xviipt}
181
- \def\huge{\@setsize\huge{23pt}\xxpt\@xxpt}
182
- \def\Huge{\@setsize\Huge{28pt}\xxvpt\@xxvpt}
183
-
184
- \def\toptitlebar{\hrule height4pt\vskip .25in\vskip-\parskip}
185
-
186
- \def\bottomtitlebar{\vskip .29in\vskip-\parskip\hrule height1pt\vskip
187
- .09in} %
188
- %Reduced second vskip to compensate for adding the strut in \@author
189
-
190
-
191
- %% % Vertical Ruler
192
- %% % This code is, largely, from the CVPR 2010 conference style file
193
- %% % ----- define vruler
194
- %% \makeatletter
195
- %% \newbox\iclrrulerbox
196
- %% \newcount\iclrrulercount
197
- %% \newdimen\iclrruleroffset
198
- %% \newdimen\cv@lineheight
199
- %% \newdimen\cv@boxheight
200
- %% \newbox\cv@tmpbox
201
- %% \newcount\cv@refno
202
- %% \newcount\cv@tot
203
- %% % NUMBER with left flushed zeros \fillzeros[<WIDTH>]<NUMBER>
204
- %% \newcount\cv@tmpc@ \newcount\cv@tmpc
205
- %% \def\fillzeros[#1]#2{\cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi
206
- %% \cv@tmpc=1 %
207
- %% \loop\ifnum\cv@tmpc@<10 \else \divide\cv@tmpc@ by 10 \advance\cv@tmpc by 1 \fi
208
- %% \ifnum\cv@tmpc@=10\relax\cv@tmpc@=11\relax\fi \ifnum\cv@tmpc@>10 \repeat
209
- %% \ifnum#2<0\advance\cv@tmpc1\relax-\fi
210
- %% \loop\ifnum\cv@tmpc<#1\relax0\advance\cv@tmpc1\relax\fi \ifnum\cv@tmpc<#1 \repeat
211
- %% \cv@tmpc@=#2\relax\ifnum\cv@tmpc@<0\cv@tmpc@=-\cv@tmpc@\fi \relax\the\cv@tmpc@}%
212
- %% % \makevruler[<SCALE>][<INITIAL_COUNT>][<STEP>][<DIGITS>][<HEIGHT>]
213
- %% \def\makevruler[#1][#2][#3][#4][#5]{\begingroup\offinterlineskip
214
- %% \textheight=#5\vbadness=10000\vfuzz=120ex\overfullrule=0pt%
215
- %% \global\setbox\iclrrulerbox=\vbox to \textheight{%
216
- %% {\parskip=0pt\hfuzz=150em\cv@boxheight=\textheight
217
- %% \cv@lineheight=#1\global\iclrrulercount=#2%
218
- %% \cv@tot\cv@boxheight\divide\cv@tot\cv@lineheight\advance\cv@tot2%
219
- %% \cv@refno1\vskip-\cv@lineheight\vskip1ex%
220
- %% \loop\setbox\cv@tmpbox=\hbox to0cm{{\iclrtenhv\hfil\fillzeros[#4]\iclrrulercount}}%
221
- %% \ht\cv@tmpbox\cv@lineheight\dp\cv@tmpbox0pt\box\cv@tmpbox\break
222
- %% \advance\cv@refno1\global\advance\iclrrulercount#3\relax
223
- %% \ifnum\cv@refno<\cv@tot\repeat}}\endgroup}%
224
- %% \makeatother
225
- %% % ----- end of vruler
226
-
227
- %% % \makevruler[<SCALE>][<INITIAL_COUNT>][<STEP>][<DIGITS>][<HEIGHT>]
228
- %% \def\iclrruler#1{\makevruler[12pt][#1][1][3][0.993\textheight]\usebox{\iclrrulerbox}}
229
- %% \AddToShipoutPicture{%
230
- %% \ificlrfinal\else
231
- %% \iclrruleroffset=\textheight
232
- %% \advance\iclrruleroffset by -3.7pt
233
- %% \color[rgb]{.7,.7,.7}
234
- %% \AtTextUpperLeft{%
235
- %% \put(\LenToUnit{-35pt},\LenToUnit{-\iclrruleroffset}){%left ruler
236
- %% \iclrruler{\iclrrulercount}}
237
- %% }
238
- %% \fi
239
- %% }
240
- %%% To add a vertical bar on the side
241
- %\AddToShipoutPicture{
242
- %\AtTextLowerLeft{
243
- %\hspace*{-1.8cm}
244
- %\colorbox[rgb]{0.7,0.7,0.7}{\small \parbox[b][\textheight]{0.1cm}{}}}
245
- %}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/introduction.tex DELETED
@@ -1,11 +0,0 @@
1
- \section{Introduction}
2
-
3
- The rapid development of artificial intelligence and machine learning has led to significant advancements in various domains, including reinforcement learning (RL) and multi-agent systems. One particularly notable application of RL is in the domain of Atari games, where deep learning models have been successfully employed to learn control policies directly from high-dimensional sensory input \citep{mnih2013playing}. However, the centralized nature of traditional RL algorithms poses challenges in terms of scalability and privacy, motivating the exploration of decentralized RL approaches \citep{liu2022federated}. In this paper, we address the problem of playing Atari games using decentralized reinforcement learning, aiming to develop a scalable and privacy-preserving solution that maintains high performance.
4
-
5
- Our proposed solution builds upon recent advancements in decentralized RL, which have demonstrated promising results in various scenarios, such as collision avoidance \citep{thumiger2022a}, cooperative multi-agent reinforcement learning \citep{su2022ma2ql}, and edge-computing-empowered Internet of Things (IoT) networks \citep{lei2022adaptive}. While these works provide valuable insights, our approach specifically targets the unique challenges associated with playing Atari games, such as high-dimensional sensory input and complex decision-making processes. By leveraging the strengths of decentralized RL algorithms, we aim to outperform centralized approaches in terms of scalability and privacy while maintaining competitive performance.
6
-
7
- This paper makes three novel contributions to the field of decentralized reinforcement learning. First, we present a new decentralized RL algorithm specifically tailored for playing Atari games, addressing the challenges of high-dimensional sensory input and complex decision-making. Second, we provide a comprehensive analysis of the algorithm's performance, comparing it to state-of-the-art centralized and decentralized RL approaches on a diverse set of Atari games. Finally, we offer insights into the trade-offs between scalability, privacy, and performance in decentralized RL, highlighting the benefits and limitations of our proposed approach.
8
-
9
- To contextualize our work, we briefly discuss key related works in the field of decentralized RL. The Safe Dec-PG algorithm, proposed by \citet{lu2021decentralized}, is the first decentralized policy gradient method that accounts for coupled safety constraints in multi-agent reinforcement learning. Another relevant work is the decentralized collision avoidance approach by \citet{thumiger2022a}, which employs a unique architecture incorporating long-short term memory cells and a gradient-based reward function. While these works demonstrate the potential of decentralized RL, our approach specifically targets the challenges associated with playing Atari games, offering a novel solution in this domain.
10
-
11
- In summary, this paper presents a novel decentralized RL algorithm for playing Atari games, aiming to achieve high performance while maintaining scalability and privacy. By building upon recent advancements in decentralized RL, we contribute to the growing body of research in this area, offering valuable insights into the trade-offs between scalability, privacy, and performance in decentralized reinforcement learning.
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/main.tex DELETED
@@ -1,35 +0,0 @@
1
- \documentclass{article} % For LaTeX2e
2
- \UseRawInputEncoding
3
- \usepackage{graphicx}
4
- \usepackage{booktabs}
5
- \usepackage{iclr2022_conference, times}
6
- \input{math_commands.tex}
7
- \usepackage{hyperref}
8
- \usepackage{url}
9
- \usepackage{algorithm}
10
- \usepackage{algpseudocode}
11
-
12
- \title{Playing Atari with Decentralized Reinforcement Learning}
13
- \author{GPT-4}
14
-
15
- \newcommand{\fix}{\marginpar{FIX}}
16
- \newcommand{\new}{\marginpar{NEW}}
17
-
18
- \begin{document}
19
- \maketitle
20
- \input{abstract.tex}
21
- \input{introduction.tex}
22
- \input{related works.tex}
23
- \input{backgrounds.tex}
24
- \input{methodology.tex}
25
- \input{experiments.tex}
26
- \input{conclusion.tex}
27
-
28
- \bibliography{ref}
29
- \bibliographystyle{iclr2022_conference}
30
-
31
- %\appendix
32
- %\section{Appendix}
33
- %You may include other additional sections here.
34
-
35
- \end{document}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/math_commands.tex DELETED
@@ -1,508 +0,0 @@
1
- %%%%% NEW MATH DEFINITIONS %%%%%
2
-
3
- \usepackage{amsmath,amsfonts,bm}
4
-
5
- % Mark sections of captions for referring to divisions of figures
6
- \newcommand{\figleft}{{\em (Left)}}
7
- \newcommand{\figcenter}{{\em (Center)}}
8
- \newcommand{\figright}{{\em (Right)}}
9
- \newcommand{\figtop}{{\em (Top)}}
10
- \newcommand{\figbottom}{{\em (Bottom)}}
11
- \newcommand{\captiona}{{\em (a)}}
12
- \newcommand{\captionb}{{\em (b)}}
13
- \newcommand{\captionc}{{\em (c)}}
14
- \newcommand{\captiond}{{\em (d)}}
15
-
16
- % Highlight a newly defined term
17
- \newcommand{\newterm}[1]{{\bf #1}}
18
-
19
-
20
- % Figure reference, lower-case.
21
- \def\figref#1{figure~\ref{#1}}
22
- % Figure reference, capital. For start of sentence
23
- \def\Figref#1{Figure~\ref{#1}}
24
- \def\twofigref#1#2{figures \ref{#1} and \ref{#2}}
25
- \def\quadfigref#1#2#3#4{figures \ref{#1}, \ref{#2}, \ref{#3} and \ref{#4}}
26
- % Section reference, lower-case.
27
- \def\secref#1{section~\ref{#1}}
28
- % Section reference, capital.
29
- \def\Secref#1{Section~\ref{#1}}
30
- % Reference to two sections.
31
- \def\twosecrefs#1#2{sections \ref{#1} and \ref{#2}}
32
- % Reference to three sections.
33
- \def\secrefs#1#2#3{sections \ref{#1}, \ref{#2} and \ref{#3}}
34
- % Reference to an equation, lower-case.
35
- \def\eqref#1{equation~\ref{#1}}
36
- % Reference to an equation, upper case
37
- \def\Eqref#1{Equation~\ref{#1}}
38
- % A raw reference to an equation---avoid using if possible
39
- \def\plaineqref#1{\ref{#1}}
40
- % Reference to a chapter, lower-case.
41
- \def\chapref#1{chapter~\ref{#1}}
42
- % Reference to an equation, upper case.
43
- \def\Chapref#1{Chapter~\ref{#1}}
44
- % Reference to a range of chapters
45
- \def\rangechapref#1#2{chapters\ref{#1}--\ref{#2}}
46
- % Reference to an algorithm, lower-case.
47
- \def\algref#1{algorithm~\ref{#1}}
48
- % Reference to an algorithm, upper case.
49
- \def\Algref#1{Algorithm~\ref{#1}}
50
- \def\twoalgref#1#2{algorithms \ref{#1} and \ref{#2}}
51
- \def\Twoalgref#1#2{Algorithms \ref{#1} and \ref{#2}}
52
- % Reference to a part, lower case
53
- \def\partref#1{part~\ref{#1}}
54
- % Reference to a part, upper case
55
- \def\Partref#1{Part~\ref{#1}}
56
- \def\twopartref#1#2{parts \ref{#1} and \ref{#2}}
57
-
58
- \def\ceil#1{\lceil #1 \rceil}
59
- \def\floor#1{\lfloor #1 \rfloor}
60
- \def\1{\bm{1}}
61
- \newcommand{\train}{\mathcal{D}}
62
- \newcommand{\valid}{\mathcal{D_{\mathrm{valid}}}}
63
- \newcommand{\test}{\mathcal{D_{\mathrm{test}}}}
64
-
65
- \def\eps{{\epsilon}}
66
-
67
-
68
- % Random variables
69
- \def\reta{{\textnormal{$\eta$}}}
70
- \def\ra{{\textnormal{a}}}
71
- \def\rb{{\textnormal{b}}}
72
- \def\rc{{\textnormal{c}}}
73
- \def\rd{{\textnormal{d}}}
74
- \def\re{{\textnormal{e}}}
75
- \def\rf{{\textnormal{f}}}
76
- \def\rg{{\textnormal{g}}}
77
- \def\rh{{\textnormal{h}}}
78
- \def\ri{{\textnormal{i}}}
79
- \def\rj{{\textnormal{j}}}
80
- \def\rk{{\textnormal{k}}}
81
- \def\rl{{\textnormal{l}}}
82
- % rm is already a command, just don't name any random variables m
83
- \def\rn{{\textnormal{n}}}
84
- \def\ro{{\textnormal{o}}}
85
- \def\rp{{\textnormal{p}}}
86
- \def\rq{{\textnormal{q}}}
87
- \def\rr{{\textnormal{r}}}
88
- \def\rs{{\textnormal{s}}}
89
- \def\rt{{\textnormal{t}}}
90
- \def\ru{{\textnormal{u}}}
91
- \def\rv{{\textnormal{v}}}
92
- \def\rw{{\textnormal{w}}}
93
- \def\rx{{\textnormal{x}}}
94
- \def\ry{{\textnormal{y}}}
95
- \def\rz{{\textnormal{z}}}
96
-
97
- % Random vectors
98
- \def\rvepsilon{{\mathbf{\epsilon}}}
99
- \def\rvtheta{{\mathbf{\theta}}}
100
- \def\rva{{\mathbf{a}}}
101
- \def\rvb{{\mathbf{b}}}
102
- \def\rvc{{\mathbf{c}}}
103
- \def\rvd{{\mathbf{d}}}
104
- \def\rve{{\mathbf{e}}}
105
- \def\rvf{{\mathbf{f}}}
106
- \def\rvg{{\mathbf{g}}}
107
- \def\rvh{{\mathbf{h}}}
108
- \def\rvu{{\mathbf{i}}}
109
- \def\rvj{{\mathbf{j}}}
110
- \def\rvk{{\mathbf{k}}}
111
- \def\rvl{{\mathbf{l}}}
112
- \def\rvm{{\mathbf{m}}}
113
- \def\rvn{{\mathbf{n}}}
114
- \def\rvo{{\mathbf{o}}}
115
- \def\rvp{{\mathbf{p}}}
116
- \def\rvq{{\mathbf{q}}}
117
- \def\rvr{{\mathbf{r}}}
118
- \def\rvs{{\mathbf{s}}}
119
- \def\rvt{{\mathbf{t}}}
120
- \def\rvu{{\mathbf{u}}}
121
- \def\rvv{{\mathbf{v}}}
122
- \def\rvw{{\mathbf{w}}}
123
- \def\rvx{{\mathbf{x}}}
124
- \def\rvy{{\mathbf{y}}}
125
- \def\rvz{{\mathbf{z}}}
126
-
127
- % Elements of random vectors
128
- \def\erva{{\textnormal{a}}}
129
- \def\ervb{{\textnormal{b}}}
130
- \def\ervc{{\textnormal{c}}}
131
- \def\ervd{{\textnormal{d}}}
132
- \def\erve{{\textnormal{e}}}
133
- \def\ervf{{\textnormal{f}}}
134
- \def\ervg{{\textnormal{g}}}
135
- \def\ervh{{\textnormal{h}}}
136
- \def\ervi{{\textnormal{i}}}
137
- \def\ervj{{\textnormal{j}}}
138
- \def\ervk{{\textnormal{k}}}
139
- \def\ervl{{\textnormal{l}}}
140
- \def\ervm{{\textnormal{m}}}
141
- \def\ervn{{\textnormal{n}}}
142
- \def\ervo{{\textnormal{o}}}
143
- \def\ervp{{\textnormal{p}}}
144
- \def\ervq{{\textnormal{q}}}
145
- \def\ervr{{\textnormal{r}}}
146
- \def\ervs{{\textnormal{s}}}
147
- \def\ervt{{\textnormal{t}}}
148
- \def\ervu{{\textnormal{u}}}
149
- \def\ervv{{\textnormal{v}}}
150
- \def\ervw{{\textnormal{w}}}
151
- \def\ervx{{\textnormal{x}}}
152
- \def\ervy{{\textnormal{y}}}
153
- \def\ervz{{\textnormal{z}}}
154
-
155
- % Random matrices
156
- \def\rmA{{\mathbf{A}}}
157
- \def\rmB{{\mathbf{B}}}
158
- \def\rmC{{\mathbf{C}}}
159
- \def\rmD{{\mathbf{D}}}
160
- \def\rmE{{\mathbf{E}}}
161
- \def\rmF{{\mathbf{F}}}
162
- \def\rmG{{\mathbf{G}}}
163
- \def\rmH{{\mathbf{H}}}
164
- \def\rmI{{\mathbf{I}}}
165
- \def\rmJ{{\mathbf{J}}}
166
- \def\rmK{{\mathbf{K}}}
167
- \def\rmL{{\mathbf{L}}}
168
- \def\rmM{{\mathbf{M}}}
169
- \def\rmN{{\mathbf{N}}}
170
- \def\rmO{{\mathbf{O}}}
171
- \def\rmP{{\mathbf{P}}}
172
- \def\rmQ{{\mathbf{Q}}}
173
- \def\rmR{{\mathbf{R}}}
174
- \def\rmS{{\mathbf{S}}}
175
- \def\rmT{{\mathbf{T}}}
176
- \def\rmU{{\mathbf{U}}}
177
- \def\rmV{{\mathbf{V}}}
178
- \def\rmW{{\mathbf{W}}}
179
- \def\rmX{{\mathbf{X}}}
180
- \def\rmY{{\mathbf{Y}}}
181
- \def\rmZ{{\mathbf{Z}}}
182
-
183
- % Elements of random matrices
184
- \def\ermA{{\textnormal{A}}}
185
- \def\ermB{{\textnormal{B}}}
186
- \def\ermC{{\textnormal{C}}}
187
- \def\ermD{{\textnormal{D}}}
188
- \def\ermE{{\textnormal{E}}}
189
- \def\ermF{{\textnormal{F}}}
190
- \def\ermG{{\textnormal{G}}}
191
- \def\ermH{{\textnormal{H}}}
192
- \def\ermI{{\textnormal{I}}}
193
- \def\ermJ{{\textnormal{J}}}
194
- \def\ermK{{\textnormal{K}}}
195
- \def\ermL{{\textnormal{L}}}
196
- \def\ermM{{\textnormal{M}}}
197
- \def\ermN{{\textnormal{N}}}
198
- \def\ermO{{\textnormal{O}}}
199
- \def\ermP{{\textnormal{P}}}
200
- \def\ermQ{{\textnormal{Q}}}
201
- \def\ermR{{\textnormal{R}}}
202
- \def\ermS{{\textnormal{S}}}
203
- \def\ermT{{\textnormal{T}}}
204
- \def\ermU{{\textnormal{U}}}
205
- \def\ermV{{\textnormal{V}}}
206
- \def\ermW{{\textnormal{W}}}
207
- \def\ermX{{\textnormal{X}}}
208
- \def\ermY{{\textnormal{Y}}}
209
- \def\ermZ{{\textnormal{Z}}}
210
-
211
- % Vectors
212
- \def\vzero{{\bm{0}}}
213
- \def\vone{{\bm{1}}}
214
- \def\vmu{{\bm{\mu}}}
215
- \def\vtheta{{\bm{\theta}}}
216
- \def\va{{\bm{a}}}
217
- \def\vb{{\bm{b}}}
218
- \def\vc{{\bm{c}}}
219
- \def\vd{{\bm{d}}}
220
- \def\ve{{\bm{e}}}
221
- \def\vf{{\bm{f}}}
222
- \def\vg{{\bm{g}}}
223
- \def\vh{{\bm{h}}}
224
- \def\vi{{\bm{i}}}
225
- \def\vj{{\bm{j}}}
226
- \def\vk{{\bm{k}}}
227
- \def\vl{{\bm{l}}}
228
- \def\vm{{\bm{m}}}
229
- \def\vn{{\bm{n}}}
230
- \def\vo{{\bm{o}}}
231
- \def\vp{{\bm{p}}}
232
- \def\vq{{\bm{q}}}
233
- \def\vr{{\bm{r}}}
234
- \def\vs{{\bm{s}}}
235
- \def\vt{{\bm{t}}}
236
- \def\vu{{\bm{u}}}
237
- \def\vv{{\bm{v}}}
238
- \def\vw{{\bm{w}}}
239
- \def\vx{{\bm{x}}}
240
- \def\vy{{\bm{y}}}
241
- \def\vz{{\bm{z}}}
242
-
243
- % Elements of vectors
244
- \def\evalpha{{\alpha}}
245
- \def\evbeta{{\beta}}
246
- \def\evepsilon{{\epsilon}}
247
- \def\evlambda{{\lambda}}
248
- \def\evomega{{\omega}}
249
- \def\evmu{{\mu}}
250
- \def\evpsi{{\psi}}
251
- \def\evsigma{{\sigma}}
252
- \def\evtheta{{\theta}}
253
- \def\eva{{a}}
254
- \def\evb{{b}}
255
- \def\evc{{c}}
256
- \def\evd{{d}}
257
- \def\eve{{e}}
258
- \def\evf{{f}}
259
- \def\evg{{g}}
260
- \def\evh{{h}}
261
- \def\evi{{i}}
262
- \def\evj{{j}}
263
- \def\evk{{k}}
264
- \def\evl{{l}}
265
- \def\evm{{m}}
266
- \def\evn{{n}}
267
- \def\evo{{o}}
268
- \def\evp{{p}}
269
- \def\evq{{q}}
270
- \def\evr{{r}}
271
- \def\evs{{s}}
272
- \def\evt{{t}}
273
- \def\evu{{u}}
274
- \def\evv{{v}}
275
- \def\evw{{w}}
276
- \def\evx{{x}}
277
- \def\evy{{y}}
278
- \def\evz{{z}}
279
-
280
- % Matrix
281
- \def\mA{{\bm{A}}}
282
- \def\mB{{\bm{B}}}
283
- \def\mC{{\bm{C}}}
284
- \def\mD{{\bm{D}}}
285
- \def\mE{{\bm{E}}}
286
- \def\mF{{\bm{F}}}
287
- \def\mG{{\bm{G}}}
288
- \def\mH{{\bm{H}}}
289
- \def\mI{{\bm{I}}}
290
- \def\mJ{{\bm{J}}}
291
- \def\mK{{\bm{K}}}
292
- \def\mL{{\bm{L}}}
293
- \def\mM{{\bm{M}}}
294
- \def\mN{{\bm{N}}}
295
- \def\mO{{\bm{O}}}
296
- \def\mP{{\bm{P}}}
297
- \def\mQ{{\bm{Q}}}
298
- \def\mR{{\bm{R}}}
299
- \def\mS{{\bm{S}}}
300
- \def\mT{{\bm{T}}}
301
- \def\mU{{\bm{U}}}
302
- \def\mV{{\bm{V}}}
303
- \def\mW{{\bm{W}}}
304
- \def\mX{{\bm{X}}}
305
- \def\mY{{\bm{Y}}}
306
- \def\mZ{{\bm{Z}}}
307
- \def\mBeta{{\bm{\beta}}}
308
- \def\mPhi{{\bm{\Phi}}}
309
- \def\mLambda{{\bm{\Lambda}}}
310
- \def\mSigma{{\bm{\Sigma}}}
311
-
312
- % Tensor
313
- \DeclareMathAlphabet{\mathsfit}{\encodingdefault}{\sfdefault}{m}{sl}
314
- \SetMathAlphabet{\mathsfit}{bold}{\encodingdefault}{\sfdefault}{bx}{n}
315
- \newcommand{\tens}[1]{\bm{\mathsfit{#1}}}
316
- \def\tA{{\tens{A}}}
317
- \def\tB{{\tens{B}}}
318
- \def\tC{{\tens{C}}}
319
- \def\tD{{\tens{D}}}
320
- \def\tE{{\tens{E}}}
321
- \def\tF{{\tens{F}}}
322
- \def\tG{{\tens{G}}}
323
- \def\tH{{\tens{H}}}
324
- \def\tI{{\tens{I}}}
325
- \def\tJ{{\tens{J}}}
326
- \def\tK{{\tens{K}}}
327
- \def\tL{{\tens{L}}}
328
- \def\tM{{\tens{M}}}
329
- \def\tN{{\tens{N}}}
330
- \def\tO{{\tens{O}}}
331
- \def\tP{{\tens{P}}}
332
- \def\tQ{{\tens{Q}}}
333
- \def\tR{{\tens{R}}}
334
- \def\tS{{\tens{S}}}
335
- \def\tT{{\tens{T}}}
336
- \def\tU{{\tens{U}}}
337
- \def\tV{{\tens{V}}}
338
- \def\tW{{\tens{W}}}
339
- \def\tX{{\tens{X}}}
340
- \def\tY{{\tens{Y}}}
341
- \def\tZ{{\tens{Z}}}
342
-
343
-
344
- % Graph
345
- \def\gA{{\mathcal{A}}}
346
- \def\gB{{\mathcal{B}}}
347
- \def\gC{{\mathcal{C}}}
348
- \def\gD{{\mathcal{D}}}
349
- \def\gE{{\mathcal{E}}}
350
- \def\gF{{\mathcal{F}}}
351
- \def\gG{{\mathcal{G}}}
352
- \def\gH{{\mathcal{H}}}
353
- \def\gI{{\mathcal{I}}}
354
- \def\gJ{{\mathcal{J}}}
355
- \def\gK{{\mathcal{K}}}
356
- \def\gL{{\mathcal{L}}}
357
- \def\gM{{\mathcal{M}}}
358
- \def\gN{{\mathcal{N}}}
359
- \def\gO{{\mathcal{O}}}
360
- \def\gP{{\mathcal{P}}}
361
- \def\gQ{{\mathcal{Q}}}
362
- \def\gR{{\mathcal{R}}}
363
- \def\gS{{\mathcal{S}}}
364
- \def\gT{{\mathcal{T}}}
365
- \def\gU{{\mathcal{U}}}
366
- \def\gV{{\mathcal{V}}}
367
- \def\gW{{\mathcal{W}}}
368
- \def\gX{{\mathcal{X}}}
369
- \def\gY{{\mathcal{Y}}}
370
- \def\gZ{{\mathcal{Z}}}
371
-
372
- % Sets
373
- \def\sA{{\mathbb{A}}}
374
- \def\sB{{\mathbb{B}}}
375
- \def\sC{{\mathbb{C}}}
376
- \def\sD{{\mathbb{D}}}
377
- % Don't use a set called E, because this would be the same as our symbol
378
- % for expectation.
379
- \def\sF{{\mathbb{F}}}
380
- \def\sG{{\mathbb{G}}}
381
- \def\sH{{\mathbb{H}}}
382
- \def\sI{{\mathbb{I}}}
383
- \def\sJ{{\mathbb{J}}}
384
- \def\sK{{\mathbb{K}}}
385
- \def\sL{{\mathbb{L}}}
386
- \def\sM{{\mathbb{M}}}
387
- \def\sN{{\mathbb{N}}}
388
- \def\sO{{\mathbb{O}}}
389
- \def\sP{{\mathbb{P}}}
390
- \def\sQ{{\mathbb{Q}}}
391
- \def\sR{{\mathbb{R}}}
392
- \def\sS{{\mathbb{S}}}
393
- \def\sT{{\mathbb{T}}}
394
- \def\sU{{\mathbb{U}}}
395
- \def\sV{{\mathbb{V}}}
396
- \def\sW{{\mathbb{W}}}
397
- \def\sX{{\mathbb{X}}}
398
- \def\sY{{\mathbb{Y}}}
399
- \def\sZ{{\mathbb{Z}}}
400
-
401
- % Entries of a matrix
402
- \def\emLambda{{\Lambda}}
403
- \def\emA{{A}}
404
- \def\emB{{B}}
405
- \def\emC{{C}}
406
- \def\emD{{D}}
407
- \def\emE{{E}}
408
- \def\emF{{F}}
409
- \def\emG{{G}}
410
- \def\emH{{H}}
411
- \def\emI{{I}}
412
- \def\emJ{{J}}
413
- \def\emK{{K}}
414
- \def\emL{{L}}
415
- \def\emM{{M}}
416
- \def\emN{{N}}
417
- \def\emO{{O}}
418
- \def\emP{{P}}
419
- \def\emQ{{Q}}
420
- \def\emR{{R}}
421
- \def\emS{{S}}
422
- \def\emT{{T}}
423
- \def\emU{{U}}
424
- \def\emV{{V}}
425
- \def\emW{{W}}
426
- \def\emX{{X}}
427
- \def\emY{{Y}}
428
- \def\emZ{{Z}}
429
- \def\emSigma{{\Sigma}}
430
-
431
- % entries of a tensor
432
- % Same font as tensor, without \bm wrapper
433
- \newcommand{\etens}[1]{\mathsfit{#1}}
434
- \def\etLambda{{\etens{\Lambda}}}
435
- \def\etA{{\etens{A}}}
436
- \def\etB{{\etens{B}}}
437
- \def\etC{{\etens{C}}}
438
- \def\etD{{\etens{D}}}
439
- \def\etE{{\etens{E}}}
440
- \def\etF{{\etens{F}}}
441
- \def\etG{{\etens{G}}}
442
- \def\etH{{\etens{H}}}
443
- \def\etI{{\etens{I}}}
444
- \def\etJ{{\etens{J}}}
445
- \def\etK{{\etens{K}}}
446
- \def\etL{{\etens{L}}}
447
- \def\etM{{\etens{M}}}
448
- \def\etN{{\etens{N}}}
449
- \def\etO{{\etens{O}}}
450
- \def\etP{{\etens{P}}}
451
- \def\etQ{{\etens{Q}}}
452
- \def\etR{{\etens{R}}}
453
- \def\etS{{\etens{S}}}
454
- \def\etT{{\etens{T}}}
455
- \def\etU{{\etens{U}}}
456
- \def\etV{{\etens{V}}}
457
- \def\etW{{\etens{W}}}
458
- \def\etX{{\etens{X}}}
459
- \def\etY{{\etens{Y}}}
460
- \def\etZ{{\etens{Z}}}
461
-
462
- % The true underlying data generating distribution
463
- \newcommand{\pdata}{p_{\rm{data}}}
464
- % The empirical distribution defined by the training set
465
- \newcommand{\ptrain}{\hat{p}_{\rm{data}}}
466
- \newcommand{\Ptrain}{\hat{P}_{\rm{data}}}
467
- % The model distribution
468
- \newcommand{\pmodel}{p_{\rm{model}}}
469
- \newcommand{\Pmodel}{P_{\rm{model}}}
470
- \newcommand{\ptildemodel}{\tilde{p}_{\rm{model}}}
471
- % Stochastic autoencoder distributions
472
- \newcommand{\pencode}{p_{\rm{encoder}}}
473
- \newcommand{\pdecode}{p_{\rm{decoder}}}
474
- \newcommand{\precons}{p_{\rm{reconstruct}}}
475
-
476
- \newcommand{\laplace}{\mathrm{Laplace}} % Laplace distribution
477
-
478
- \newcommand{\E}{\mathbb{E}}
479
- \newcommand{\Ls}{\mathcal{L}}
480
- \newcommand{\R}{\mathbb{R}}
481
- \newcommand{\emp}{\tilde{p}}
482
- \newcommand{\lr}{\alpha}
483
- \newcommand{\reg}{\lambda}
484
- \newcommand{\rect}{\mathrm{rectifier}}
485
- \newcommand{\softmax}{\mathrm{softmax}}
486
- \newcommand{\sigmoid}{\sigma}
487
- \newcommand{\softplus}{\zeta}
488
- \newcommand{\KL}{D_{\mathrm{KL}}}
489
- \newcommand{\Var}{\mathrm{Var}}
490
- \newcommand{\standarderror}{\mathrm{SE}}
491
- \newcommand{\Cov}{\mathrm{Cov}}
492
- % Wolfram Mathworld says $L^2$ is for function spaces and $\ell^2$ is for vectors
493
- % But then they seem to use $L^2$ for vectors throughout the site, and so does
494
- % wikipedia.
495
- \newcommand{\normlzero}{L^0}
496
- \newcommand{\normlone}{L^1}
497
- \newcommand{\normltwo}{L^2}
498
- \newcommand{\normlp}{L^p}
499
- \newcommand{\normmax}{L^\infty}
500
-
501
- \newcommand{\parents}{Pa} % See usage in notation.tex. Chosen to match Daphne's book.
502
-
503
- \DeclareMathOperator*{\argmax}{arg\,max}
504
- \DeclareMathOperator*{\argmin}{arg\,min}
505
-
506
- \DeclareMathOperator{\sign}{sign}
507
- \DeclareMathOperator{\Tr}{Tr}
508
- \let\ab\allowbreak
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/methodology.tex DELETED
@@ -1,68 +0,0 @@
1
- \section{methodology}
2
-
3
- In this section, we present the methodology of our proposed decentralized reinforcement learning (RL) algorithm for playing Atari games. We begin with a high-level overview of the method, followed by a detailed formulation of the algorithm and an explanation of how it overcomes the weaknesses of existing methods. Finally, we highlight the key concepts in our approach and elaborate on their novelty using formulas and figures.
4
-
5
- \subsection{Overview of the Proposed Method}
6
-
7
- Our proposed method, Decentralized Atari Learning (DAL), combines the strengths of both value-based and policy-based decentralized RL algorithms to address the challenges of high-dimensional sensory input and complex decision-making processes in Atari games. The key components of DAL include a decentralized Q-learning framework, a policy gradient-based optimization technique, and a novel communication mechanism that enables agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. Figure \ref{fig1} provides a high-level illustration of the DAL architecture.
8
-
9
- \begin{figure}[h]
10
- \centering
11
- \includegraphics[width=0.8\textwidth]{fig1.png}
12
- \caption{High-level architecture of the Decentralized Atari Learning (DAL) algorithm.}
13
- \label{fig1}
14
- \end{figure}
15
-
16
- \subsection{Formulation of the Decentralized Atari Learning Algorithm}
17
-
18
- The DAL algorithm is designed to overcome the weaknesses of existing decentralized RL methods by incorporating techniques from deep RL, such as experience replay and target networks, to improve stability and convergence. The algorithm consists of the following main steps:
19
-
20
- \begin{algorithm}[h]
21
- \caption{Decentralized Atari Learning (DAL)}
22
- \begin{algorithmic}[1]
23
- \STATE Initialize the decentralized Q-network $Q(s, a; \theta)$ and the target network $Q(s, a; \theta^-)$ with random weights $\theta$ and $\theta^-$.
24
- \FOR{each agent $i$}
25
- \STATE Initialize the experience replay buffer $D_i$.
26
- \FOR{each episode}
27
- \STATE Initialize the state $s$.
28
- \FOR{each time step $t$}
29
- \STATE Agent $i$ selects an action $a$ according to its local policy $\pi_i$ and the decentralized Q-network $Q(s, a; \theta)$.
30
- \STATE Agent $i$ takes action $a$, observes the next state $s'$ and reward $r$, and stores the transition $(s, a, r, s')$ in its experience replay buffer $D_i$.
31
- \STATE Agent $i$ samples a mini-batch of transitions from $D_i$ and computes the target values $y = r + \gamma \max_{a'} Q(s', a'; \theta^-)$.
32
- \STATE Agent $i$ updates the decentralized Q-network $Q(s, a; \theta)$ using the policy gradient-based optimization technique.
33
- \STATE Agent $i$ updates the target network $Q(s, a; \theta^-)$ with the weights of the decentralized Q-network $Q(s, a; \theta)$.
34
- \STATE Agent $i$ communicates with neighboring agents to share information and coordinate actions while preserving privacy and reducing communication overhead.
35
- \STATE Update the state $s \leftarrow s'$.
36
- \ENDFOR
37
- \ENDFOR
38
- \ENDFOR
39
- \end{algorithmic}
40
- \end{algorithm}
41
-
42
- \subsection{Key Concepts and Novelty of the Decentralized Atari Learning Algorithm}
43
-
44
- The novelty of the DAL algorithm lies in its combination of value-based and policy-based decentralized RL techniques, as well as its unique communication mechanism that enables agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. In this subsection, we elaborate on these key concepts using formulas and figures.
45
-
46
- \paragraph{Decentralized Q-learning and Policy Gradient Optimization}
47
-
48
- The DAL algorithm builds upon the decentralized Q-learning framework and incorporates a policy gradient-based optimization technique to balance the trade-offs between exploration and exploitation. The decentralized Q-network $Q(s, a; \theta)$ is used to estimate the action-value function, while the policy gradient-based optimization technique is employed to update the network weights $\theta$. This combination allows the algorithm to learn more efficiently in high-dimensional state spaces and complex decision-making processes, as illustrated in Figure \ref{fig2}.
49
-
50
- \begin{figure}[h]
51
- \centering
52
- \includegraphics[width=0.8\textwidth]{fig2.png}
53
- \caption{Illustration of the decentralized Q-learning and policy gradient optimization in the DAL algorithm.}
54
- \label{fig2}
55
- \end{figure}
56
-
57
- \paragraph{Novel Communication Mechanism}
58
-
59
- The communication mechanism in DAL enables agents to share information and coordinate their actions while preserving privacy and reducing communication overhead. This is achieved through a secure and efficient communication protocol that allows agents to exchange only the necessary information for coordination, without revealing their entire state or action history. Figure \ref{fig3} provides an illustration of the communication mechanism in the DAL algorithm.
60
-
61
- \begin{figure}[h]
62
- \centering
63
- \includegraphics[width=0.8\textwidth]{fig3.png}
64
- \caption{Illustration of the novel communication mechanism in the DAL algorithm.}
65
- \label{fig3}
66
- \end{figure}
67
-
68
- In summary, our proposed Decentralized Atari Learning (DAL) algorithm combines the strengths of both value-based and policy-based decentralized RL techniques and introduces a novel communication mechanism to address the challenges of high-dimensional sensory input and complex decision-making processes in Atari games. The algorithm demonstrates competitive performance compared to centralized methods and outperforms existing decentralized RL algorithms in the Atari domain.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/natbib.sty DELETED
@@ -1,1246 +0,0 @@
1
- %%
2
- %% This is file `natbib.sty',
3
- %% generated with the docstrip utility.
4
- %%
5
- %% The original source files were:
6
- %%
7
- %% natbib.dtx (with options: `package,all')
8
- %% =============================================
9
- %% IMPORTANT NOTICE:
10
- %%
11
- %% This program can be redistributed and/or modified under the terms
12
- %% of the LaTeX Project Public License Distributed from CTAN
13
- %% archives in directory macros/latex/base/lppl.txt; either
14
- %% version 1 of the License, or any later version.
15
- %%
16
- %% This is a generated file.
17
- %% It may not be distributed without the original source file natbib.dtx.
18
- %%
19
- %% Full documentation can be obtained by LaTeXing that original file.
20
- %% Only a few abbreviated comments remain here to describe the usage.
21
- %% =============================================
22
- %% Copyright 1993-2009 Patrick W Daly
23
- %% Max-Planck-Institut f\"ur Sonnensystemforschung
24
- %% Max-Planck-Str. 2
25
- %% D-37191 Katlenburg-Lindau
26
- %% Germany
27
- %% E-mail: daly@mps.mpg.de
28
- \NeedsTeXFormat{LaTeX2e}[1995/06/01]
29
- \ProvidesPackage{natbib}
30
- [2009/07/16 8.31 (PWD, AO)]
31
-
32
- % This package reimplements the LaTeX \cite command to be used for various
33
- % citation styles, both author-year and numerical. It accepts BibTeX
34
- % output intended for many other packages, and therefore acts as a
35
- % general, all-purpose citation-style interface.
36
- %
37
- % With standard numerical .bst files, only numerical citations are
38
- % possible. With an author-year .bst file, both numerical and
39
- % author-year citations are possible.
40
- %
41
- % If author-year citations are selected, \bibitem must have one of the
42
- % following forms:
43
- % \bibitem[Jones et al.(1990)]{key}...
44
- % \bibitem[Jones et al.(1990)Jones, Baker, and Williams]{key}...
45
- % \bibitem[Jones et al., 1990]{key}...
46
- % \bibitem[\protect\citeauthoryear{Jones, Baker, and Williams}{Jones
47
- % et al.}{1990}]{key}...
48
- % \bibitem[\protect\citeauthoryear{Jones et al.}{1990}]{key}...
49
- % \bibitem[\protect\astroncite{Jones et al.}{1990}]{key}...
50
- % \bibitem[\protect\citename{Jones et al., }1990]{key}...
51
- % \harvarditem[Jones et al.]{Jones, Baker, and Williams}{1990}{key}...
52
- %
53
- % This is either to be made up manually, or to be generated by an
54
- % appropriate .bst file with BibTeX.
55
- % Author-year mode || Numerical mode
56
- % Then, \citet{key} ==>> Jones et al. (1990) || Jones et al. [21]
57
- % \citep{key} ==>> (Jones et al., 1990) || [21]
58
- % Multiple citations as normal:
59
- % \citep{key1,key2} ==>> (Jones et al., 1990; Smith, 1989) || [21,24]
60
- % or (Jones et al., 1990, 1991) || [21,24]
61
- % or (Jones et al., 1990a,b) || [21,24]
62
- % \cite{key} is the equivalent of \citet{key} in author-year mode
63
- % and of \citep{key} in numerical mode
64
- % Full author lists may be forced with \citet* or \citep*, e.g.
65
- % \citep*{key} ==>> (Jones, Baker, and Williams, 1990)
66
- % Optional notes as:
67
- % \citep[chap. 2]{key} ==>> (Jones et al., 1990, chap. 2)
68
- % \citep[e.g.,][]{key} ==>> (e.g., Jones et al., 1990)
69
- % \citep[see][pg. 34]{key}==>> (see Jones et al., 1990, pg. 34)
70
- % (Note: in standard LaTeX, only one note is allowed, after the ref.
71
- % Here, one note is like the standard, two make pre- and post-notes.)
72
- % \citealt{key} ==>> Jones et al. 1990
73
- % \citealt*{key} ==>> Jones, Baker, and Williams 1990
74
- % \citealp{key} ==>> Jones et al., 1990
75
- % \citealp*{key} ==>> Jones, Baker, and Williams, 1990
76
- % Additional citation possibilities (both author-year and numerical modes)
77
- % \citeauthor{key} ==>> Jones et al.
78
- % \citeauthor*{key} ==>> Jones, Baker, and Williams
79
- % \citeyear{key} ==>> 1990
80
- % \citeyearpar{key} ==>> (1990)
81
- % \citetext{priv. comm.} ==>> (priv. comm.)
82
- % \citenum{key} ==>> 11 [non-superscripted]
83
- % Note: full author lists depends on whether the bib style supports them;
84
- % if not, the abbreviated list is printed even when full requested.
85
- %
86
- % For names like della Robbia at the start of a sentence, use
87
- % \Citet{dRob98} ==>> Della Robbia (1998)
88
- % \Citep{dRob98} ==>> (Della Robbia, 1998)
89
- % \Citeauthor{dRob98} ==>> Della Robbia
90
- %
91
- %
92
- % Citation aliasing is achieved with
93
- % \defcitealias{key}{text}
94
- % \citetalias{key} ==>> text
95
- % \citepalias{key} ==>> (text)
96
- %
97
- % Defining the citation mode and punctual (citation style)
98
- % \setcitestyle{<comma-separated list of keywords, same
99
- % as the package options>}
100
- % Example: \setcitestyle{square,semicolon}
101
- % Alternatively:
102
- % Use \bibpunct with 6 mandatory arguments:
103
- % 1. opening bracket for citation
104
- % 2. closing bracket
105
- % 3. citation separator (for multiple citations in one \cite)
106
- % 4. the letter n for numerical styles, s for superscripts
107
- % else anything for author-year
108
- % 5. punctuation between authors and date
109
- % 6. punctuation between years (or numbers) when common authors missing
110
- % One optional argument is the character coming before post-notes. It
111
- % appears in square braces before all other arguments. May be left off.
112
- % Example (and default) \bibpunct[, ]{(}{)}{;}{a}{,}{,}
113
- %
114
- % To make this automatic for a given bib style, named newbib, say, make
115
- % a local configuration file, natbib.cfg, with the definition
116
- % \newcommand{\bibstyle@newbib}{\bibpunct...}
117
- % Then the \bibliographystyle{newbib} will cause \bibstyle@newbib to
118
- % be called on THE NEXT LATEX RUN (via the aux file).
119
- %
120
- % Such preprogrammed definitions may be invoked anywhere in the text
121
- % by calling \citestyle{newbib}. This is only useful if the style specified
122
- % differs from that in \bibliographystyle.
123
- %
124
- % With \citeindextrue and \citeindexfalse, one can control whether the
125
- % \cite commands make an automatic entry of the citation in the .idx
126
- % indexing file. For this, \makeindex must also be given in the preamble.
127
- %
128
- % Package Options: (for selecting punctuation)
129
- % round - round parentheses are used (default)
130
- % square - square brackets are used [option]
131
- % curly - curly braces are used {option}
132
- % angle - angle brackets are used <option>
133
- % semicolon - multiple citations separated by semi-colon (default)
134
- % colon - same as semicolon, an earlier confusion
135
- % comma - separated by comma
136
- % authoryear - selects author-year citations (default)
137
- % numbers- selects numerical citations
138
- % super - numerical citations as superscripts
139
- % sort - sorts multiple citations according to order in ref. list
140
- % sort&compress - like sort, but also compresses numerical citations
141
- % compress - compresses without sorting
142
- % longnamesfirst - makes first citation full author list
143
- % sectionbib - puts bibliography in a \section* instead of \chapter*
144
- % merge - allows the citation key to have a * prefix,
145
- % signifying to merge its reference with that of the previous citation.
146
- % elide - if references are merged, repeated portions of later ones may be removed.
147
- % mcite - recognizes and ignores the * prefix for merging.
148
- % Punctuation so selected dominates over any predefined ones.
149
- % Package options are called as, e.g.
150
- % \usepackage[square,comma]{natbib}
151
- % LaTeX the source file natbib.dtx to obtain more details
152
- % or the file natnotes.tex for a brief reference sheet.
153
- %-----------------------------------------------------------
154
- \providecommand\@ifxundefined[1]{%
155
- \ifx#1\@undefined\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
156
- }%
157
- \providecommand\@ifnum[1]{%
158
- \ifnum#1\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
159
- }%
160
- \providecommand\@ifx[1]{%
161
- \ifx#1\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
162
- }%
163
- \providecommand\appdef[2]{%
164
- \toks@\expandafter{#1}\@temptokena{#2}%
165
- \edef#1{\the\toks@\the\@temptokena}%
166
- }%
167
- \@ifclassloaded{agu2001}{\PackageError{natbib}
168
- {The agu2001 class already includes natbib coding,\MessageBreak
169
- so you should not add it explicitly}
170
- {Type <Return> for now, but then later remove\MessageBreak
171
- the command \protect\usepackage{natbib} from the document}
172
- \endinput}{}
173
- \@ifclassloaded{agutex}{\PackageError{natbib}
174
- {The AGUTeX class already includes natbib coding,\MessageBreak
175
- so you should not add it explicitly}
176
- {Type <Return> for now, but then later remove\MessageBreak
177
- the command \protect\usepackage{natbib} from the document}
178
- \endinput}{}
179
- \@ifclassloaded{aguplus}{\PackageError{natbib}
180
- {The aguplus class already includes natbib coding,\MessageBreak
181
- so you should not add it explicitly}
182
- {Type <Return> for now, but then later remove\MessageBreak
183
- the command \protect\usepackage{natbib} from the document}
184
- \endinput}{}
185
- \@ifclassloaded{nlinproc}{\PackageError{natbib}
186
- {The nlinproc class already includes natbib coding,\MessageBreak
187
- so you should not add it explicitly}
188
- {Type <Return> for now, but then later remove\MessageBreak
189
- the command \protect\usepackage{natbib} from the document}
190
- \endinput}{}
191
- \@ifclassloaded{egs}{\PackageError{natbib}
192
- {The egs class already includes natbib coding,\MessageBreak
193
- so you should not add it explicitly}
194
- {Type <Return> for now, but then later remove\MessageBreak
195
- the command \protect\usepackage{natbib} from the document}
196
- \endinput}{}
197
- \@ifclassloaded{egu}{\PackageError{natbib}
198
- {The egu class already includes natbib coding,\MessageBreak
199
- so you should not add it explicitly}
200
- {Type <Return> for now, but then later remove\MessageBreak
201
- the command \protect\usepackage{natbib} from the document}
202
- \endinput}{}
203
- % Define citation punctuation for some author-year styles
204
- % One may add and delete at this point
205
- % Or put additions into local configuration file natbib.cfg
206
- \newcommand\bibstyle@chicago{\bibpunct{(}{)}{;}{a}{,}{,}}
207
- \newcommand\bibstyle@named{\bibpunct{[}{]}{;}{a}{,}{,}}
208
- \newcommand\bibstyle@agu{\bibpunct{[}{]}{;}{a}{,}{,~}}%Amer. Geophys. Union
209
- \newcommand\bibstyle@copernicus{\bibpunct{(}{)}{;}{a}{,}{,}}%Copernicus Publications
210
- \let\bibstyle@egu=\bibstyle@copernicus
211
- \let\bibstyle@egs=\bibstyle@copernicus
212
- \newcommand\bibstyle@agsm{\bibpunct{(}{)}{,}{a}{}{,}\gdef\harvardand{\&}}
213
- \newcommand\bibstyle@kluwer{\bibpunct{(}{)}{,}{a}{}{,}\gdef\harvardand{\&}}
214
- \newcommand\bibstyle@dcu{\bibpunct{(}{)}{;}{a}{;}{,}\gdef\harvardand{and}}
215
- \newcommand\bibstyle@aa{\bibpunct{(}{)}{;}{a}{}{,}} %Astronomy & Astrophysics
216
- \newcommand\bibstyle@pass{\bibpunct{(}{)}{;}{a}{,}{,}}%Planet. & Space Sci
217
- \newcommand\bibstyle@anngeo{\bibpunct{(}{)}{;}{a}{,}{,}}%Annales Geophysicae
218
- \newcommand\bibstyle@nlinproc{\bibpunct{(}{)}{;}{a}{,}{,}}%Nonlin.Proc.Geophys.
219
- % Define citation punctuation for some numerical styles
220
- \newcommand\bibstyle@cospar{\bibpunct{/}{/}{,}{n}{}{}%
221
- \gdef\bibnumfmt##1{##1.}}
222
- \newcommand\bibstyle@esa{\bibpunct{(Ref.~}{)}{,}{n}{}{}%
223
- \gdef\bibnumfmt##1{##1.\hspace{1em}}}
224
- \newcommand\bibstyle@nature{\bibpunct{}{}{,}{s}{}{\textsuperscript{,}}%
225
- \gdef\bibnumfmt##1{##1.}}
226
- % The standard LaTeX styles
227
- \newcommand\bibstyle@plain{\bibpunct{[}{]}{,}{n}{}{,}}
228
- \let\bibstyle@alpha=\bibstyle@plain
229
- \let\bibstyle@abbrv=\bibstyle@plain
230
- \let\bibstyle@unsrt=\bibstyle@plain
231
- % The author-year modifications of the standard styles
232
- \newcommand\bibstyle@plainnat{\bibpunct{[}{]}{,}{a}{,}{,}}
233
- \let\bibstyle@abbrvnat=\bibstyle@plainnat
234
- \let\bibstyle@unsrtnat=\bibstyle@plainnat
235
- \newif\ifNAT@numbers \NAT@numbersfalse
236
- \newif\ifNAT@super \NAT@superfalse
237
- \let\NAT@merge\z@
238
- \DeclareOption{numbers}{\NAT@numberstrue
239
- \ExecuteOptions{square,comma,nobibstyle}}
240
- \DeclareOption{super}{\NAT@supertrue\NAT@numberstrue
241
- \renewcommand\NAT@open{}\renewcommand\NAT@close{}
242
- \ExecuteOptions{nobibstyle}}
243
- \DeclareOption{authoryear}{\NAT@numbersfalse
244
- \ExecuteOptions{round,semicolon,bibstyle}}
245
- \DeclareOption{round}{%
246
- \renewcommand\NAT@open{(} \renewcommand\NAT@close{)}
247
- \ExecuteOptions{nobibstyle}}
248
- \DeclareOption{square}{%
249
- \renewcommand\NAT@open{[} \renewcommand\NAT@close{]}
250
- \ExecuteOptions{nobibstyle}}
251
- \DeclareOption{angle}{%
252
- \renewcommand\NAT@open{$<$} \renewcommand\NAT@close{$>$}
253
- \ExecuteOptions{nobibstyle}}
254
- \DeclareOption{curly}{%
255
- \renewcommand\NAT@open{\{} \renewcommand\NAT@close{\}}
256
- \ExecuteOptions{nobibstyle}}
257
- \DeclareOption{comma}{\renewcommand\NAT@sep{,}
258
- \ExecuteOptions{nobibstyle}}
259
- \DeclareOption{semicolon}{\renewcommand\NAT@sep{;}
260
- \ExecuteOptions{nobibstyle}}
261
- \DeclareOption{colon}{\ExecuteOptions{semicolon}}
262
- \DeclareOption{nobibstyle}{\let\bibstyle=\@gobble}
263
- \DeclareOption{bibstyle}{\let\bibstyle=\@citestyle}
264
- \newif\ifNAT@openbib \NAT@openbibfalse
265
- \DeclareOption{openbib}{\NAT@openbibtrue}
266
- \DeclareOption{sectionbib}{\def\NAT@sectionbib{on}}
267
- \def\NAT@sort{\z@}
268
- \def\NAT@cmprs{\z@}
269
- \DeclareOption{sort}{\def\NAT@sort{\@ne}}
270
- \DeclareOption{compress}{\def\NAT@cmprs{\@ne}}
271
- \DeclareOption{sort&compress}{\def\NAT@sort{\@ne}\def\NAT@cmprs{\@ne}}
272
- \DeclareOption{mcite}{\let\NAT@merge\@ne}
273
- \DeclareOption{merge}{\@ifnum{\NAT@merge<\tw@}{\let\NAT@merge\tw@}{}}
274
- \DeclareOption{elide}{\@ifnum{\NAT@merge<\thr@@}{\let\NAT@merge\thr@@}{}}
275
- \@ifpackageloaded{cite}{\PackageWarningNoLine{natbib}
276
- {The `cite' package should not be used\MessageBreak
277
- with natbib. Use option `sort' instead}\ExecuteOptions{sort}}{}
278
- \@ifpackageloaded{mcite}{\PackageWarningNoLine{natbib}
279
- {The `mcite' package should not be used\MessageBreak
280
- with natbib. Use option `merge' instead}\ExecuteOptions{merge}}{}
281
- \@ifpackageloaded{citeref}{\PackageError{natbib}
282
- {The `citeref' package must be loaded after natbib}%
283
- {Move \protect\usepackage{citeref} to after \string\usepackage{natbib}}}{}
284
- \newif\ifNAT@longnames\NAT@longnamesfalse
285
- \DeclareOption{longnamesfirst}{\NAT@longnamestrue}
286
- \DeclareOption{nonamebreak}{\def\NAT@nmfmt#1{\mbox{\NAT@up#1}}}
287
- \def\NAT@nmfmt#1{{\NAT@up#1}}
288
- \renewcommand\bibstyle[1]{\csname bibstyle@#1\endcsname}
289
- \AtBeginDocument{\global\let\bibstyle=\@gobble}
290
- \let\@citestyle\bibstyle
291
- \newcommand\citestyle[1]{\@citestyle{#1}\let\bibstyle\@gobble}
292
- \newcommand\bibpunct[7][, ]%
293
- {\gdef\NAT@open{#2}\gdef\NAT@close{#3}\gdef
294
- \NAT@sep{#4}\global\NAT@numbersfalse
295
- \ifx #5n\global\NAT@numberstrue\global\NAT@superfalse
296
- \else
297
- \ifx #5s\global\NAT@numberstrue\global\NAT@supertrue
298
- \fi\fi
299
- \gdef\NAT@aysep{#6}\gdef\NAT@yrsep{#7}%
300
- \gdef\NAT@cmt{#1}%
301
- \NAT@@setcites
302
- }
303
- \newcommand\setcitestyle[1]{
304
- \@for\@tempa:=#1\do
305
- {\def\@tempb{round}\ifx\@tempa\@tempb
306
- \renewcommand\NAT@open{(}\renewcommand\NAT@close{)}\fi
307
- \def\@tempb{square}\ifx\@tempa\@tempb
308
- \renewcommand\NAT@open{[}\renewcommand\NAT@close{]}\fi
309
- \def\@tempb{angle}\ifx\@tempa\@tempb
310
- \renewcommand\NAT@open{$<$}\renewcommand\NAT@close{$>$}\fi
311
- \def\@tempb{curly}\ifx\@tempa\@tempb
312
- \renewcommand\NAT@open{\{}\renewcommand\NAT@close{\}}\fi
313
- \def\@tempb{semicolon}\ifx\@tempa\@tempb
314
- \renewcommand\NAT@sep{;}\fi
315
- \def\@tempb{colon}\ifx\@tempa\@tempb
316
- \renewcommand\NAT@sep{;}\fi
317
- \def\@tempb{comma}\ifx\@tempa\@tempb
318
- \renewcommand\NAT@sep{,}\fi
319
- \def\@tempb{authoryear}\ifx\@tempa\@tempb
320
- \NAT@numbersfalse\fi
321
- \def\@tempb{numbers}\ifx\@tempa\@tempb
322
- \NAT@numberstrue\NAT@superfalse\fi
323
- \def\@tempb{super}\ifx\@tempa\@tempb
324
- \NAT@numberstrue\NAT@supertrue\fi
325
- \expandafter\NAT@find@eq\@tempa=\relax\@nil
326
- \if\@tempc\relax\else
327
- \expandafter\NAT@rem@eq\@tempc
328
- \def\@tempb{open}\ifx\@tempa\@tempb
329
- \xdef\NAT@open{\@tempc}\fi
330
- \def\@tempb{close}\ifx\@tempa\@tempb
331
- \xdef\NAT@close{\@tempc}\fi
332
- \def\@tempb{aysep}\ifx\@tempa\@tempb
333
- \xdef\NAT@aysep{\@tempc}\fi
334
- \def\@tempb{yysep}\ifx\@tempa\@tempb
335
- \xdef\NAT@yrsep{\@tempc}\fi
336
- \def\@tempb{notesep}\ifx\@tempa\@tempb
337
- \xdef\NAT@cmt{\@tempc}\fi
338
- \def\@tempb{citesep}\ifx\@tempa\@tempb
339
- \xdef\NAT@sep{\@tempc}\fi
340
- \fi
341
- }%
342
- \NAT@@setcites
343
- }
344
- \def\NAT@find@eq#1=#2\@nil{\def\@tempa{#1}\def\@tempc{#2}}
345
- \def\NAT@rem@eq#1={\def\@tempc{#1}}
346
- \def\NAT@@setcites{\global\let\bibstyle\@gobble}
347
- \AtBeginDocument{\let\NAT@@setcites\NAT@set@cites}
348
- \newcommand\NAT@open{(} \newcommand\NAT@close{)}
349
- \newcommand\NAT@sep{;}
350
- \ProcessOptions
351
- \newcommand\NAT@aysep{,} \newcommand\NAT@yrsep{,}
352
- \newcommand\NAT@cmt{, }
353
- \newcommand\NAT@cite%
354
- [3]{\ifNAT@swa\NAT@@open\if*#2*\else#2\NAT@spacechar\fi
355
- #1\if*#3*\else\NAT@cmt#3\fi\NAT@@close\else#1\fi\endgroup}
356
- \newcommand\NAT@citenum%
357
- [3]{\ifNAT@swa\NAT@@open\if*#2*\else#2\NAT@spacechar\fi
358
- #1\if*#3*\else\NAT@cmt#3\fi\NAT@@close\else#1\fi\endgroup}
359
- \newcommand\NAT@citesuper[3]{\ifNAT@swa
360
- \if*#2*\else#2\NAT@spacechar\fi
361
- \unskip\kern\p@\textsuperscript{\NAT@@open#1\NAT@@close}%
362
- \if*#3*\else\NAT@spacechar#3\fi\else #1\fi\endgroup}
363
- \providecommand\textsuperscript[1]{\mbox{$^{\mbox{\scriptsize#1}}$}}
364
- \begingroup \catcode`\_=8
365
- \gdef\NAT@ifcat@num#1{%
366
- \ifcat_\ifnum\z@<0#1_\else A\fi
367
- \expandafter\@firstoftwo
368
- \else
369
- \expandafter\@secondoftwo
370
- \fi
371
- }%
372
- \endgroup
373
- \providecommand\@firstofone[1]{#1}
374
- \newcommand\NAT@citexnum{}
375
- \def\NAT@citexnum[#1][#2]#3{%
376
- \NAT@reset@parser
377
- \NAT@sort@cites{#3}%
378
- \NAT@reset@citea
379
- \@cite{\def\NAT@num{-1}\let\NAT@last@yr\relax\let\NAT@nm\@empty
380
- \@for\@citeb:=\NAT@cite@list\do
381
- {\@safe@activestrue
382
- \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}%
383
- \@safe@activesfalse
384
- \@ifundefined{b@\@citeb\@extra@b@citeb}{%
385
- {\reset@font\bfseries?}
386
- \NAT@citeundefined\PackageWarning{natbib}%
387
- {Citation `\@citeb' on page \thepage \space undefined}}%
388
- {\let\NAT@last@num\NAT@num\let\NAT@last@nm\NAT@nm
389
- \NAT@parse{\@citeb}%
390
- \ifNAT@longnames\@ifundefined{bv@\@citeb\@extra@b@citeb}{%
391
- \let\NAT@name=\NAT@all@names
392
- \global\@namedef{bv@\@citeb\@extra@b@citeb}{}}{}%
393
- \fi
394
- \ifNAT@full\let\NAT@nm\NAT@all@names\else
395
- \let\NAT@nm\NAT@name\fi
396
- \ifNAT@swa
397
- \@ifnum{\NAT@ctype>\@ne}{%
398
- \@citea
399
- \NAT@hyper@{\@ifnum{\NAT@ctype=\tw@}{\NAT@test{\NAT@ctype}}{\NAT@alias}}%
400
- }{%
401
- \@ifnum{\NAT@cmprs>\z@}{%
402
- \NAT@ifcat@num\NAT@num
403
- {\let\NAT@nm=\NAT@num}%
404
- {\def\NAT@nm{-2}}%
405
- \NAT@ifcat@num\NAT@last@num
406
- {\@tempcnta=\NAT@last@num\relax}%
407
- {\@tempcnta\m@ne}%
408
- \@ifnum{\NAT@nm=\@tempcnta}{%
409
- \@ifnum{\NAT@merge>\@ne}{}{\NAT@last@yr@mbox}%
410
- }{%
411
- \advance\@tempcnta by\@ne
412
- \@ifnum{\NAT@nm=\@tempcnta}{%
413
- \ifx\NAT@last@yr\relax
414
- \def@NAT@last@yr{\@citea}%
415
- \else
416
- \def@NAT@last@yr{--\NAT@penalty}%
417
- \fi
418
- }{%
419
- \NAT@last@yr@mbox
420
- }%
421
- }%
422
- }{%
423
- \@tempswatrue
424
- \@ifnum{\NAT@merge>\@ne}{\@ifnum{\NAT@last@num=\NAT@num\relax}{\@tempswafalse}{}}{}%
425
- \if@tempswa\NAT@citea@mbox\fi
426
- }%
427
- }%
428
- \NAT@def@citea
429
- \else
430
- \ifcase\NAT@ctype
431
- \ifx\NAT@last@nm\NAT@nm \NAT@yrsep\NAT@penalty\NAT@space\else
432
- \@citea \NAT@test{\@ne}\NAT@spacechar\NAT@mbox{\NAT@super@kern\NAT@@open}%
433
- \fi
434
- \if*#1*\else#1\NAT@spacechar\fi
435
- \NAT@mbox{\NAT@hyper@{{\citenumfont{\NAT@num}}}}%
436
- \NAT@def@citea@box
437
- \or
438
- \NAT@hyper@citea@space{\NAT@test{\NAT@ctype}}%
439
- \or
440
- \NAT@hyper@citea@space{\NAT@test{\NAT@ctype}}%
441
- \or
442
- \NAT@hyper@citea@space\NAT@alias
443
- \fi
444
- \fi
445
- }%
446
- }%
447
- \@ifnum{\NAT@cmprs>\z@}{\NAT@last@yr}{}%
448
- \ifNAT@swa\else
449
- \@ifnum{\NAT@ctype=\z@}{%
450
- \if*#2*\else\NAT@cmt#2\fi
451
- }{}%
452
- \NAT@mbox{\NAT@@close}%
453
- \fi
454
- }{#1}{#2}%
455
- }%
456
- \def\NAT@citea@mbox{%
457
- \@citea\mbox{\NAT@hyper@{{\citenumfont{\NAT@num}}}}%
458
- }%
459
- \def\NAT@hyper@#1{%
460
- \hyper@natlinkstart{\@citeb\@extra@b@citeb}#1\hyper@natlinkend
461
- }%
462
- \def\NAT@hyper@citea#1{%
463
- \@citea
464
- \NAT@hyper@{#1}%
465
- \NAT@def@citea
466
- }%
467
- \def\NAT@hyper@citea@space#1{%
468
- \@citea
469
- \NAT@hyper@{#1}%
470
- \NAT@def@citea@space
471
- }%
472
- \def\def@NAT@last@yr#1{%
473
- \protected@edef\NAT@last@yr{%
474
- #1%
475
- \noexpand\mbox{%
476
- \noexpand\hyper@natlinkstart{\@citeb\@extra@b@citeb}%
477
- {\noexpand\citenumfont{\NAT@num}}%
478
- \noexpand\hyper@natlinkend
479
- }%
480
- }%
481
- }%
482
- \def\NAT@last@yr@mbox{%
483
- \NAT@last@yr\let\NAT@last@yr\relax
484
- \NAT@citea@mbox
485
- }%
486
- \newcommand\NAT@test[1]{%
487
- \@ifnum{#1=\@ne}{%
488
- \ifx\NAT@nm\NAT@noname
489
- \begingroup\reset@font\bfseries(author?)\endgroup
490
- \PackageWarning{natbib}{%
491
- Author undefined for citation`\@citeb' \MessageBreak on page \thepage%
492
- }%
493
- \else \NAT@nm
494
- \fi
495
- }{%
496
- \if\relax\NAT@date\relax
497
- \begingroup\reset@font\bfseries(year?)\endgroup
498
- \PackageWarning{natbib}{%
499
- Year undefined for citation`\@citeb' \MessageBreak on page \thepage%
500
- }%
501
- \else \NAT@date
502
- \fi
503
- }%
504
- }%
505
- \let\citenumfont=\@empty
506
- \newcommand\NAT@citex{}
507
- \def\NAT@citex%
508
- [#1][#2]#3{%
509
- \NAT@reset@parser
510
- \NAT@sort@cites{#3}%
511
- \NAT@reset@citea
512
- \@cite{\let\NAT@nm\@empty\let\NAT@year\@empty
513
- \@for\@citeb:=\NAT@cite@list\do
514
- {\@safe@activestrue
515
- \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}%
516
- \@safe@activesfalse
517
- \@ifundefined{b@\@citeb\@extra@b@citeb}{\@citea%
518
- {\reset@font\bfseries ?}\NAT@citeundefined
519
- \PackageWarning{natbib}%
520
- {Citation `\@citeb' on page \thepage \space undefined}\def\NAT@date{}}%
521
- {\let\NAT@last@nm=\NAT@nm\let\NAT@last@yr=\NAT@year
522
- \NAT@parse{\@citeb}%
523
- \ifNAT@longnames\@ifundefined{bv@\@citeb\@extra@b@citeb}{%
524
- \let\NAT@name=\NAT@all@names
525
- \global\@namedef{bv@\@citeb\@extra@b@citeb}{}}{}%
526
- \fi
527
- \ifNAT@full\let\NAT@nm\NAT@all@names\else
528
- \let\NAT@nm\NAT@name\fi
529
- \ifNAT@swa\ifcase\NAT@ctype
530
- \if\relax\NAT@date\relax
531
- \@citea\NAT@hyper@{\NAT@nmfmt{\NAT@nm}\NAT@date}%
532
- \else
533
- \ifx\NAT@last@nm\NAT@nm\NAT@yrsep
534
- \ifx\NAT@last@yr\NAT@year
535
- \def\NAT@temp{{?}}%
536
- \ifx\NAT@temp\NAT@exlab\PackageWarningNoLine{natbib}%
537
- {Multiple citation on page \thepage: same authors and
538
- year\MessageBreak without distinguishing extra
539
- letter,\MessageBreak appears as question mark}\fi
540
- \NAT@hyper@{\NAT@exlab}%
541
- \else\unskip\NAT@spacechar
542
- \NAT@hyper@{\NAT@date}%
543
- \fi
544
- \else
545
- \@citea\NAT@hyper@{%
546
- \NAT@nmfmt{\NAT@nm}%
547
- \hyper@natlinkbreak{%
548
- \NAT@aysep\NAT@spacechar}{\@citeb\@extra@b@citeb
549
- }%
550
- \NAT@date
551
- }%
552
- \fi
553
- \fi
554
- \or\@citea\NAT@hyper@{\NAT@nmfmt{\NAT@nm}}%
555
- \or\@citea\NAT@hyper@{\NAT@date}%
556
- \or\@citea\NAT@hyper@{\NAT@alias}%
557
- \fi \NAT@def@citea
558
- \else
559
- \ifcase\NAT@ctype
560
- \if\relax\NAT@date\relax
561
- \@citea\NAT@hyper@{\NAT@nmfmt{\NAT@nm}}%
562
- \else
563
- \ifx\NAT@last@nm\NAT@nm\NAT@yrsep
564
- \ifx\NAT@last@yr\NAT@year
565
- \def\NAT@temp{{?}}%
566
- \ifx\NAT@temp\NAT@exlab\PackageWarningNoLine{natbib}%
567
- {Multiple citation on page \thepage: same authors and
568
- year\MessageBreak without distinguishing extra
569
- letter,\MessageBreak appears as question mark}\fi
570
- \NAT@hyper@{\NAT@exlab}%
571
- \else
572
- \unskip\NAT@spacechar
573
- \NAT@hyper@{\NAT@date}%
574
- \fi
575
- \else
576
- \@citea\NAT@hyper@{%
577
- \NAT@nmfmt{\NAT@nm}%
578
- \hyper@natlinkbreak{\NAT@spacechar\NAT@@open\if*#1*\else#1\NAT@spacechar\fi}%
579
- {\@citeb\@extra@b@citeb}%
580
- \NAT@date
581
- }%
582
- \fi
583
- \fi
584
- \or\@citea\NAT@hyper@{\NAT@nmfmt{\NAT@nm}}%
585
- \or\@citea\NAT@hyper@{\NAT@date}%
586
- \or\@citea\NAT@hyper@{\NAT@alias}%
587
- \fi
588
- \if\relax\NAT@date\relax
589
- \NAT@def@citea
590
- \else
591
- \NAT@def@citea@close
592
- \fi
593
- \fi
594
- }}\ifNAT@swa\else\if*#2*\else\NAT@cmt#2\fi
595
- \if\relax\NAT@date\relax\else\NAT@@close\fi\fi}{#1}{#2}}
596
- \def\NAT@spacechar{\ }%
597
- \def\NAT@separator{\NAT@sep\NAT@penalty}%
598
- \def\NAT@reset@citea{\c@NAT@ctr\@ne\let\@citea\@empty}%
599
- \def\NAT@def@citea{\def\@citea{\NAT@separator\NAT@space}}%
600
- \def\NAT@def@citea@space{\def\@citea{\NAT@separator\NAT@spacechar}}%
601
- \def\NAT@def@citea@close{\def\@citea{\NAT@@close\NAT@separator\NAT@space}}%
602
- \def\NAT@def@citea@box{\def\@citea{\NAT@mbox{\NAT@@close}\NAT@separator\NAT@spacechar}}%
603
- \newif\ifNAT@par \NAT@partrue
604
- \newcommand\NAT@@open{\ifNAT@par\NAT@open\fi}
605
- \newcommand\NAT@@close{\ifNAT@par\NAT@close\fi}
606
- \newcommand\NAT@alias{\@ifundefined{al@\@citeb\@extra@b@citeb}{%
607
- {\reset@font\bfseries(alias?)}\PackageWarning{natbib}
608
- {Alias undefined for citation `\@citeb'
609
- \MessageBreak on page \thepage}}{\@nameuse{al@\@citeb\@extra@b@citeb}}}
610
- \let\NAT@up\relax
611
- \newcommand\NAT@Up[1]{{\let\protect\@unexpandable@protect\let~\relax
612
- \expandafter\NAT@deftemp#1}\expandafter\NAT@UP\NAT@temp}
613
- \newcommand\NAT@deftemp[1]{\xdef\NAT@temp{#1}}
614
- \newcommand\NAT@UP[1]{\let\@tempa\NAT@UP\ifcat a#1\MakeUppercase{#1}%
615
- \let\@tempa\relax\else#1\fi\@tempa}
616
- \newcommand\shortcites[1]{%
617
- \@bsphack\@for\@citeb:=#1\do
618
- {\@safe@activestrue
619
- \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}%
620
- \@safe@activesfalse
621
- \global\@namedef{bv@\@citeb\@extra@b@citeb}{}}\@esphack}
622
- \newcommand\NAT@biblabel[1]{\hfill}
623
- \newcommand\NAT@biblabelnum[1]{\bibnumfmt{#1}}
624
- \let\bibnumfmt\@empty
625
- \providecommand\@biblabel[1]{[#1]}
626
- \AtBeginDocument{\ifx\bibnumfmt\@empty\let\bibnumfmt\@biblabel\fi}
627
- \newcommand\NAT@bibsetnum[1]{\settowidth\labelwidth{\@biblabel{#1}}%
628
- \setlength{\leftmargin}{\labelwidth}\addtolength{\leftmargin}{\labelsep}%
629
- \setlength{\itemsep}{\bibsep}\setlength{\parsep}{\z@}%
630
- \ifNAT@openbib
631
- \addtolength{\leftmargin}{\bibindent}%
632
- \setlength{\itemindent}{-\bibindent}%
633
- \setlength{\listparindent}{\itemindent}%
634
- \setlength{\parsep}{0pt}%
635
- \fi
636
- }
637
- \newlength{\bibhang}
638
- \setlength{\bibhang}{1em}
639
- \newlength{\bibsep}
640
- {\@listi \global\bibsep\itemsep \global\advance\bibsep by\parsep}
641
-
642
- \newcommand\NAT@bibsetup%
643
- [1]{\setlength{\leftmargin}{\bibhang}\setlength{\itemindent}{-\leftmargin}%
644
- \setlength{\itemsep}{\bibsep}\setlength{\parsep}{\z@}}
645
- \newcommand\NAT@set@cites{%
646
- \ifNAT@numbers
647
- \ifNAT@super \let\@cite\NAT@citesuper
648
- \def\NAT@mbox##1{\unskip\nobreak\textsuperscript{##1}}%
649
- \let\citeyearpar=\citeyear
650
- \let\NAT@space\relax
651
- \def\NAT@super@kern{\kern\p@}%
652
- \else
653
- \let\NAT@mbox=\mbox
654
- \let\@cite\NAT@citenum
655
- \let\NAT@space\NAT@spacechar
656
- \let\NAT@super@kern\relax
657
- \fi
658
- \let\@citex\NAT@citexnum
659
- \let\@biblabel\NAT@biblabelnum
660
- \let\@bibsetup\NAT@bibsetnum
661
- \renewcommand\NAT@idxtxt{\NAT@name\NAT@spacechar\NAT@open\NAT@num\NAT@close}%
662
- \def\natexlab##1{}%
663
- \def\NAT@penalty{\penalty\@m}%
664
- \else
665
- \let\@cite\NAT@cite
666
- \let\@citex\NAT@citex
667
- \let\@biblabel\NAT@biblabel
668
- \let\@bibsetup\NAT@bibsetup
669
- \let\NAT@space\NAT@spacechar
670
- \let\NAT@penalty\@empty
671
- \renewcommand\NAT@idxtxt{\NAT@name\NAT@spacechar\NAT@open\NAT@date\NAT@close}%
672
- \def\natexlab##1{##1}%
673
- \fi}
674
- \AtBeginDocument{\NAT@set@cites}
675
- \AtBeginDocument{\ifx\SK@def\@undefined\else
676
- \ifx\SK@cite\@empty\else
677
- \SK@def\@citex[#1][#2]#3{\SK@\SK@@ref{#3}\SK@@citex[#1][#2]{#3}}\fi
678
- \ifx\SK@citeauthor\@undefined\def\HAR@checkdef{}\else
679
- \let\citeauthor\SK@citeauthor
680
- \let\citefullauthor\SK@citefullauthor
681
- \let\citeyear\SK@citeyear\fi
682
- \fi}
683
- \newif\ifNAT@full\NAT@fullfalse
684
- \newif\ifNAT@swa
685
- \DeclareRobustCommand\citet
686
- {\begingroup\NAT@swafalse\let\NAT@ctype\z@\NAT@partrue
687
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
688
- \newcommand\NAT@citetp{\@ifnextchar[{\NAT@@citetp}{\NAT@@citetp[]}}
689
- \newcommand\NAT@@citetp{}
690
- \def\NAT@@citetp[#1]{\@ifnextchar[{\@citex[#1]}{\@citex[][#1]}}
691
- \DeclareRobustCommand\citep
692
- {\begingroup\NAT@swatrue\let\NAT@ctype\z@\NAT@partrue
693
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
694
- \DeclareRobustCommand\cite
695
- {\begingroup\let\NAT@ctype\z@\NAT@partrue\NAT@swatrue
696
- \@ifstar{\NAT@fulltrue\NAT@cites}{\NAT@fullfalse\NAT@cites}}
697
- \newcommand\NAT@cites{\@ifnextchar [{\NAT@@citetp}{%
698
- \ifNAT@numbers\else
699
- \NAT@swafalse
700
- \fi
701
- \NAT@@citetp[]}}
702
- \DeclareRobustCommand\citealt
703
- {\begingroup\NAT@swafalse\let\NAT@ctype\z@\NAT@parfalse
704
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
705
- \DeclareRobustCommand\citealp
706
- {\begingroup\NAT@swatrue\let\NAT@ctype\z@\NAT@parfalse
707
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
708
- \DeclareRobustCommand\citenum
709
- {\begingroup
710
- \NAT@swatrue\let\NAT@ctype\z@\NAT@parfalse\let\textsuperscript\NAT@spacechar
711
- \NAT@citexnum[][]}
712
- \DeclareRobustCommand\citeauthor
713
- {\begingroup\NAT@swafalse\let\NAT@ctype\@ne\NAT@parfalse
714
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
715
- \DeclareRobustCommand\Citet
716
- {\begingroup\NAT@swafalse\let\NAT@ctype\z@\NAT@partrue
717
- \let\NAT@up\NAT@Up
718
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
719
- \DeclareRobustCommand\Citep
720
- {\begingroup\NAT@swatrue\let\NAT@ctype\z@\NAT@partrue
721
- \let\NAT@up\NAT@Up
722
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
723
- \DeclareRobustCommand\Citealt
724
- {\begingroup\NAT@swafalse\let\NAT@ctype\z@\NAT@parfalse
725
- \let\NAT@up\NAT@Up
726
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
727
- \DeclareRobustCommand\Citealp
728
- {\begingroup\NAT@swatrue\let\NAT@ctype\z@\NAT@parfalse
729
- \let\NAT@up\NAT@Up
730
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
731
- \DeclareRobustCommand\Citeauthor
732
- {\begingroup\NAT@swafalse\let\NAT@ctype\@ne\NAT@parfalse
733
- \let\NAT@up\NAT@Up
734
- \@ifstar{\NAT@fulltrue\NAT@citetp}{\NAT@fullfalse\NAT@citetp}}
735
- \DeclareRobustCommand\citeyear
736
- {\begingroup\NAT@swafalse\let\NAT@ctype\tw@\NAT@parfalse\NAT@citetp}
737
- \DeclareRobustCommand\citeyearpar
738
- {\begingroup\NAT@swatrue\let\NAT@ctype\tw@\NAT@partrue\NAT@citetp}
739
- \newcommand\citetext[1]{\NAT@open#1\NAT@close}
740
- \DeclareRobustCommand\citefullauthor
741
- {\citeauthor*}
742
- \newcommand\defcitealias[2]{%
743
- \@ifundefined{al@#1\@extra@b@citeb}{}
744
- {\PackageWarning{natbib}{Overwriting existing alias for citation #1}}
745
- \@namedef{al@#1\@extra@b@citeb}{#2}}
746
- \DeclareRobustCommand\citetalias{\begingroup
747
- \NAT@swafalse\let\NAT@ctype\thr@@\NAT@parfalse\NAT@citetp}
748
- \DeclareRobustCommand\citepalias{\begingroup
749
- \NAT@swatrue\let\NAT@ctype\thr@@\NAT@partrue\NAT@citetp}
750
- \renewcommand\nocite[1]{\@bsphack
751
- \@for\@citeb:=#1\do{%
752
- \@safe@activestrue
753
- \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}%
754
- \@safe@activesfalse
755
- \if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
756
- \if*\@citeb\else
757
- \@ifundefined{b@\@citeb\@extra@b@citeb}{%
758
- \NAT@citeundefined \PackageWarning{natbib}%
759
- {Citation `\@citeb' undefined}}{}\fi}%
760
- \@esphack}
761
- \newcommand\NAT@parse[1]{%
762
- \begingroup
763
- \let\protect=\@unexpandable@protect
764
- \let~\relax
765
- \let\active@prefix=\@gobble
766
- \edef\NAT@temp{\csname b@#1\@extra@b@citeb\endcsname}%
767
- \aftergroup\NAT@split
768
- \expandafter
769
- \endgroup
770
- \NAT@temp{}{}{}{}{}@@%
771
- \expandafter\NAT@parse@date\NAT@date??????@@%
772
- \ifciteindex\NAT@index\fi
773
- }%
774
- \def\NAT@split#1#2#3#4#5@@{%
775
- \gdef\NAT@num{#1}\gdef\NAT@name{#3}\gdef\NAT@date{#2}%
776
- \gdef\NAT@all@names{#4}%
777
- \ifx\NAT@num\@empty\gdef\NAT@num{0}\fi
778
- \ifx\NAT@noname\NAT@all@names \gdef\NAT@all@names{#3}\fi
779
- }%
780
- \def\NAT@reset@parser{%
781
- \global\let\NAT@num\@empty
782
- \global\let\NAT@name\@empty
783
- \global\let\NAT@date\@empty
784
- \global\let\NAT@all@names\@empty
785
- }%
786
- \newcommand\NAT@parse@date{}
787
- \def\NAT@parse@date#1#2#3#4#5#6@@{%
788
- \ifnum\the\catcode`#1=11\def\NAT@year{}\def\NAT@exlab{#1}\else
789
- \ifnum\the\catcode`#2=11\def\NAT@year{#1}\def\NAT@exlab{#2}\else
790
- \ifnum\the\catcode`#3=11\def\NAT@year{#1#2}\def\NAT@exlab{#3}\else
791
- \ifnum\the\catcode`#4=11\def\NAT@year{#1#2#3}\def\NAT@exlab{#4}\else
792
- \def\NAT@year{#1#2#3#4}\def\NAT@exlab{{#5}}\fi\fi\fi\fi}
793
- \newcommand\NAT@index{}
794
- \let\NAT@makeindex=\makeindex
795
- \renewcommand\makeindex{\NAT@makeindex
796
- \renewcommand\NAT@index{\@bsphack\begingroup
797
- \def~{\string~}\@wrindex{\NAT@idxtxt}}}
798
- \newcommand\NAT@idxtxt{\NAT@name\NAT@spacechar\NAT@open\NAT@date\NAT@close}
799
- \@ifxundefined\@indexfile{}{\let\NAT@makeindex\relax\makeindex}
800
- \newif\ifciteindex \citeindexfalse
801
- \newcommand\citeindextype{default}
802
- \newcommand\NAT@index@alt{{\let\protect=\noexpand\let~\relax
803
- \xdef\NAT@temp{\NAT@idxtxt}}\expandafter\NAT@exp\NAT@temp\@nil}
804
- \newcommand\NAT@exp{}
805
- \def\NAT@exp#1\@nil{\index[\citeindextype]{#1}}
806
-
807
- \AtBeginDocument{%
808
- \@ifpackageloaded{index}{\let\NAT@index=\NAT@index@alt}{}}
809
- \newcommand\NAT@ifcmd{\futurelet\NAT@temp\NAT@ifxcmd}
810
- \newcommand\NAT@ifxcmd{\ifx\NAT@temp\relax\else\expandafter\NAT@bare\fi}
811
- \def\NAT@bare#1(#2)#3(@)#4\@nil#5{%
812
- \if @#2
813
- \expandafter\NAT@apalk#1, , \@nil{#5}%
814
- \else
815
- \NAT@wrout{\the\c@NAT@ctr}{#2}{#1}{#3}{#5}%
816
- \fi
817
- }
818
- \newcommand\NAT@wrout[5]{%
819
- \if@filesw
820
- {\let\protect\noexpand\let~\relax
821
- \immediate
822
- \write\@auxout{\string\bibcite{#5}{{#1}{#2}{{#3}}{{#4}}}}}\fi
823
- \ignorespaces}
824
- \def\NAT@noname{{}}
825
- \renewcommand\bibitem{\@ifnextchar[{\@lbibitem}{\@lbibitem[]}}%
826
- \let\NAT@bibitem@first@sw\@secondoftwo
827
- \def\@lbibitem[#1]#2{%
828
- \if\relax\@extra@b@citeb\relax\else
829
- \@ifundefined{br@#2\@extra@b@citeb}{}{%
830
- \@namedef{br@#2}{\@nameuse{br@#2\@extra@b@citeb}}%
831
- }%
832
- \fi
833
- \@ifundefined{b@#2\@extra@b@citeb}{%
834
- \def\NAT@num{}%
835
- }{%
836
- \NAT@parse{#2}%
837
- }%
838
- \def\NAT@tmp{#1}%
839
- \expandafter\let\expandafter\bibitemOpen\csname NAT@b@open@#2\endcsname
840
- \expandafter\let\expandafter\bibitemShut\csname NAT@b@shut@#2\endcsname
841
- \@ifnum{\NAT@merge>\@ne}{%
842
- \NAT@bibitem@first@sw{%
843
- \@firstoftwo
844
- }{%
845
- \@ifundefined{NAT@b*@#2}{%
846
- \@firstoftwo
847
- }{%
848
- \expandafter\def\expandafter\NAT@num\expandafter{\the\c@NAT@ctr}%
849
- \@secondoftwo
850
- }%
851
- }%
852
- }{%
853
- \@firstoftwo
854
- }%
855
- {%
856
- \global\advance\c@NAT@ctr\@ne
857
- \@ifx{\NAT@tmp\@empty}{\@firstoftwo}{%
858
- \@secondoftwo
859
- }%
860
- {%
861
- \expandafter\def\expandafter\NAT@num\expandafter{\the\c@NAT@ctr}%
862
- \global\NAT@stdbsttrue
863
- }{}%
864
- \bibitem@fin
865
- \item[\hfil\NAT@anchor{#2}{\NAT@num}]%
866
- \global\let\NAT@bibitem@first@sw\@secondoftwo
867
- \NAT@bibitem@init
868
- }%
869
- {%
870
- \NAT@anchor{#2}{}%
871
- \NAT@bibitem@cont
872
- \bibitem@fin
873
- }%
874
- \@ifx{\NAT@tmp\@empty}{%
875
- \NAT@wrout{\the\c@NAT@ctr}{}{}{}{#2}%
876
- }{%
877
- \expandafter\NAT@ifcmd\NAT@tmp(@)(@)\@nil{#2}%
878
- }%
879
- }%
880
- \def\bibitem@fin{%
881
- \@ifxundefined\@bibstop{}{\csname bibitem@\@bibstop\endcsname}%
882
- }%
883
- \def\NAT@bibitem@init{%
884
- \let\@bibstop\@undefined
885
- }%
886
- \def\NAT@bibitem@cont{%
887
- \let\bibitem@Stop\bibitemStop
888
- \let\bibitem@NoStop\bibitemContinue
889
- }%
890
- \def\BibitemOpen{%
891
- \bibitemOpen
892
- }%
893
- \def\BibitemShut#1{%
894
- \bibitemShut
895
- \def\@bibstop{#1}%
896
- \let\bibitem@Stop\bibitemStop
897
- \let\bibitem@NoStop\bibitemNoStop
898
- }%
899
- \def\bibitemStop{}%
900
- \def\bibitemNoStop{.\spacefactor\@mmm\space}%
901
- \def\bibitemContinue{\spacefactor\@mmm\space}%
902
- \mathchardef\@mmm=3000 %
903
- \providecommand{\bibAnnote}[3]{%
904
- \BibitemShut{#1}%
905
- \def\@tempa{#3}\@ifx{\@tempa\@empty}{}{%
906
- \begin{quotation}\noindent
907
- \textsc{Key:}\ #2\\\textsc{Annotation:}\ \@tempa
908
- \end{quotation}%
909
- }%
910
- }%
911
- \providecommand{\bibAnnoteFile}[2]{%
912
- \IfFileExists{#2}{%
913
- \bibAnnote{#1}{#2}{\input{#2}}%
914
- }{%
915
- \bibAnnote{#1}{#2}{}%
916
- }%
917
- }%
918
- \let\bibitemOpen\relax
919
- \let\bibitemShut\relax
920
- \def\bibfield{\@ifnum{\NAT@merge>\tw@}{\@bibfield}{\@secondoftwo}}%
921
- \def\@bibfield#1#2{%
922
- \begingroup
923
- \let\Doi\@gobble
924
- \let\bibinfo\relax
925
- \let\restore@protect\@empty
926
- \protected@edef\@tempa{#2}%
927
- \aftergroup\def\aftergroup\@tempa
928
- \expandafter\endgroup\expandafter{\@tempa}%
929
- \expandafter\@ifx\expandafter{\csname @bib#1\endcsname\@tempa}{%
930
- \expandafter\let\expandafter\@tempa\csname @bib@X#1\endcsname
931
- }{%
932
- \expandafter\let\csname @bib#1\endcsname\@tempa
933
- \expandafter\let\expandafter\@tempa\csname @bib@Y#1\endcsname
934
- }%
935
- \@ifx{\@tempa\relax}{\let\@tempa\@firstofone}{}%
936
- \@tempa{#2}%
937
- }%
938
- \def\bibinfo#1{%
939
- \expandafter\let\expandafter\@tempa\csname bibinfo@X@#1\endcsname
940
- \@ifx{\@tempa\relax}{\@firstofone}{\@tempa}%
941
- }%
942
- \def\@bib@Xauthor#1{\let\@bib@Xjournal\@gobble}%
943
- \def\@bib@Xjournal#1{\begingroup\let\bibinfo@X@journal\@bib@Z@journal#1\endgroup}%
944
- \def\@bibibid@#1{\textit{ibid}.}%
945
- \appdef\NAT@bibitem@init{%
946
- \let\@bibauthor \@empty
947
- \let\@bibjournal \@empty
948
- \let\@bib@Z@journal\@bibibid@
949
- }%
950
- \ifx\SK@lbibitem\@undefined\else
951
- \let\SK@lbibitem\@lbibitem
952
- \def\@lbibitem[#1]#2{%
953
- \SK@lbibitem[#1]{#2}\SK@\SK@@label{#2}\ignorespaces}\fi
954
- \newif\ifNAT@stdbst \NAT@stdbstfalse
955
-
956
- \AtEndDocument{%
957
- \ifNAT@stdbst\if@filesw
958
- \immediate\write\@auxout{%
959
- \string\providecommand\string\NAT@force@numbers{}%
960
- \string\NAT@force@numbers
961
- }%
962
- \fi\fi
963
- }
964
- \newcommand\NAT@force@numbers{%
965
- \ifNAT@numbers\else
966
- \PackageError{natbib}{Bibliography not compatible with author-year
967
- citations.\MessageBreak
968
- Press <return> to continue in numerical citation style}
969
- {Check the bibliography entries for non-compliant syntax,\MessageBreak
970
- or select author-year BibTeX style, e.g. plainnat}%
971
- \global\NAT@numberstrue\fi}
972
-
973
- \providecommand\bibcite{}
974
- \renewcommand\bibcite[2]{%
975
- \@ifundefined{b@#1\@extra@binfo}{\relax}{%
976
- \NAT@citemultiple
977
- \PackageWarningNoLine{natbib}{Citation `#1' multiply defined}%
978
- }%
979
- \global\@namedef{b@#1\@extra@binfo}{#2}%
980
- }%
981
- \AtEndDocument{\NAT@swatrue\let\bibcite\NAT@testdef}
982
- \newcommand\NAT@testdef[2]{%
983
- \def\NAT@temp{#2}%
984
- \expandafter \ifx \csname b@#1\@extra@binfo\endcsname\NAT@temp
985
- \else
986
- \ifNAT@swa \NAT@swafalse
987
- \PackageWarningNoLine{natbib}{%
988
- Citation(s) may have changed.\MessageBreak
989
- Rerun to get citations correct%
990
- }%
991
- \fi
992
- \fi
993
- }%
994
- \newcommand\NAT@apalk{}
995
- \def\NAT@apalk#1, #2, #3\@nil#4{%
996
- \if\relax#2\relax
997
- \global\NAT@stdbsttrue
998
- \NAT@wrout{#1}{}{}{}{#4}%
999
- \else
1000
- \NAT@wrout{\the\c@NAT@ctr}{#2}{#1}{}{#4}%
1001
- \fi
1002
- }%
1003
- \newcommand\citeauthoryear{}
1004
- \def\citeauthoryear#1#2#3(@)(@)\@nil#4{%
1005
- \if\relax#3\relax
1006
- \NAT@wrout{\the\c@NAT@ctr}{#2}{#1}{}{#4}%
1007
- \else
1008
- \NAT@wrout{\the\c@NAT@ctr}{#3}{#2}{#1}{#4}%
1009
- \fi
1010
- }%
1011
- \newcommand\citestarts{\NAT@open}%
1012
- \newcommand\citeends{\NAT@close}%
1013
- \newcommand\betweenauthors{and}%
1014
- \newcommand\astroncite{}
1015
- \def\astroncite#1#2(@)(@)\@nil#3{%
1016
- \NAT@wrout{\the\c@NAT@ctr}{#2}{#1}{}{#3}%
1017
- }%
1018
- \newcommand\citename{}
1019
- \def\citename#1#2(@)(@)\@nil#3{\expandafter\NAT@apalk#1#2, \@nil{#3}}
1020
- \newcommand\harvarditem[4][]{%
1021
- \if\relax#1\relax
1022
- \bibitem[#2(#3)]{#4}%
1023
- \else
1024
- \bibitem[#1(#3)#2]{#4}%
1025
- \fi
1026
- }%
1027
- \newcommand\harvardleft{\NAT@open}
1028
- \newcommand\harvardright{\NAT@close}
1029
- \newcommand\harvardyearleft{\NAT@open}
1030
- \newcommand\harvardyearright{\NAT@close}
1031
- \AtBeginDocument{\providecommand{\harvardand}{and}}
1032
- \newcommand\harvardurl[1]{\textbf{URL:} \textit{#1}}
1033
- \providecommand\bibsection{}
1034
- \@ifundefined{chapter}{%
1035
- \renewcommand\bibsection{%
1036
- \section*{\refname\@mkboth{\MakeUppercase{\refname}}{\MakeUppercase{\refname}}}%
1037
- }%
1038
- }{%
1039
- \@ifxundefined\NAT@sectionbib{%
1040
- \renewcommand\bibsection{%
1041
- \chapter*{\bibname\@mkboth{\MakeUppercase{\bibname}}{\MakeUppercase{\bibname}}}%
1042
- }%
1043
- }{%
1044
- \renewcommand\bibsection{%
1045
- \section*{\bibname\ifx\@mkboth\@gobbletwo\else\markright{\MakeUppercase{\bibname}}\fi}%
1046
- }%
1047
- }%
1048
- }%
1049
- \@ifclassloaded{amsart}{\renewcommand\bibsection{\section*{\refname}}}{}%
1050
- \@ifclassloaded{amsbook}{\renewcommand\bibsection{\chapter*{\bibname}}}{}%
1051
- \@ifxundefined\bib@heading{}{\let\bibsection\bib@heading}%
1052
- \newcounter{NAT@ctr}
1053
- \renewenvironment{thebibliography}[1]{%
1054
- \bibsection
1055
- \parindent\z@
1056
- \bibpreamble
1057
- \bibfont
1058
- \list{\@biblabel{\the\c@NAT@ctr}}{\@bibsetup{#1}\global\c@NAT@ctr\z@}%
1059
- \ifNAT@openbib
1060
- \renewcommand\newblock{\par}%
1061
- \else
1062
- \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}%
1063
- \fi
1064
- \sloppy\clubpenalty4000\widowpenalty4000
1065
- \sfcode`\.\@m
1066
- \let\NAT@bibitem@first@sw\@firstoftwo
1067
- \let\citeN\cite \let\shortcite\cite
1068
- \let\citeasnoun\cite
1069
- }{%
1070
- \bibitem@fin
1071
- \bibpostamble
1072
- \def\@noitemerr{%
1073
- \PackageWarning{natbib}{Empty `thebibliography' environment}%
1074
- }%
1075
- \endlist
1076
- \bibcleanup
1077
- }%
1078
- \let\bibfont\@empty
1079
- \let\bibpreamble\@empty
1080
- \let\bibpostamble\@empty
1081
- \def\bibcleanup{\vskip-\lastskip}%
1082
- \providecommand\reset@font{\relax}
1083
- \providecommand\bibname{Bibliography}
1084
- \providecommand\refname{References}
1085
- \newcommand\NAT@citeundefined{\gdef \NAT@undefined {%
1086
- \PackageWarningNoLine{natbib}{There were undefined citations}}}
1087
- \let \NAT@undefined \relax
1088
- \newcommand\NAT@citemultiple{\gdef \NAT@multiple {%
1089
- \PackageWarningNoLine{natbib}{There were multiply defined citations}}}
1090
- \let \NAT@multiple \relax
1091
- \AtEndDocument{\NAT@undefined\NAT@multiple}
1092
- \providecommand\@mkboth[2]{}
1093
- \providecommand\MakeUppercase{\uppercase}
1094
- \providecommand{\@extra@b@citeb}{}
1095
- \gdef\@extra@binfo{}
1096
- \def\NAT@anchor#1#2{%
1097
- \hyper@natanchorstart{#1\@extra@b@citeb}%
1098
- \def\@tempa{#2}\@ifx{\@tempa\@empty}{}{\@biblabel{#2}}%
1099
- \hyper@natanchorend
1100
- }%
1101
- \providecommand\hyper@natanchorstart[1]{}%
1102
- \providecommand\hyper@natanchorend{}%
1103
- \providecommand\hyper@natlinkstart[1]{}%
1104
- \providecommand\hyper@natlinkend{}%
1105
- \providecommand\hyper@natlinkbreak[2]{#1}%
1106
- \AtBeginDocument{%
1107
- \@ifpackageloaded{babel}{%
1108
- \let\org@@citex\@citex}{}}
1109
- \providecommand\@safe@activestrue{}%
1110
- \providecommand\@safe@activesfalse{}%
1111
-
1112
- \newcommand\NAT@sort@cites[1]{%
1113
- \let\NAT@cite@list\@empty
1114
- \@for\@citeb:=#1\do{\expandafter\NAT@star@cite\@citeb\@@}%
1115
- \if@filesw
1116
- \expandafter\immediate\expandafter\write\expandafter\@auxout
1117
- \expandafter{\expandafter\string\expandafter\citation\expandafter{\NAT@cite@list}}%
1118
- \fi
1119
- \@ifnum{\NAT@sort>\z@}{%
1120
- \expandafter\NAT@sort@cites@\expandafter{\NAT@cite@list}%
1121
- }{}%
1122
- }%
1123
- \def\NAT@star@cite{%
1124
- \let\NAT@star@sw\@secondoftwo
1125
- \@ifnum{\NAT@merge>\z@}{%
1126
- \@ifnextchar*{%
1127
- \let\NAT@star@sw\@firstoftwo
1128
- \NAT@star@cite@star
1129
- }{%
1130
- \NAT@star@cite@nostar
1131
- }%
1132
- }{%
1133
- \NAT@star@cite@noextension
1134
- }%
1135
- }%
1136
- \def\NAT@star@cite@star*{%
1137
- \NAT@star@cite@nostar
1138
- }%
1139
- \def\NAT@star@cite@nostar{%
1140
- \let\nat@keyopt@open\@empty
1141
- \let\nat@keyopt@shut\@empty
1142
- \@ifnextchar[{\NAT@star@cite@pre}{\NAT@star@cite@pre[]}%
1143
- }%
1144
- \def\NAT@star@cite@pre[#1]{%
1145
- \def\nat@keyopt@open{#1}%
1146
- \@ifnextchar[{\NAT@star@cite@post}{\NAT@star@cite@post[]}%
1147
- }%
1148
- \def\NAT@star@cite@post[#1]#2\@@{%
1149
- \def\nat@keyopt@shut{#1}%
1150
- \NAT@star@sw{\expandafter\global\expandafter\let\csname NAT@b*@#2\endcsname\@empty}{}%
1151
- \NAT@cite@list@append{#2}%
1152
- }%
1153
- \def\NAT@star@cite@noextension#1\@@{%
1154
- \let\nat@keyopt@open\@empty
1155
- \let\nat@keyopt@shut\@empty
1156
- \NAT@cite@list@append{#1}%
1157
- }%
1158
- \def\NAT@cite@list@append#1{%
1159
- \edef\@citeb{\@firstofone#1\@empty}%
1160
- \if@filesw\@ifxundefined\@cprwrite{}{\expandafter\@cprwrite\@citeb=}\fi
1161
- \if\relax\nat@keyopt@open\relax\else
1162
- \global\expandafter\let\csname NAT@b@open@\@citeb\endcsname\nat@keyopt@open
1163
- \fi
1164
- \if\relax\nat@keyopt@shut\relax\else
1165
- \global\expandafter\let\csname NAT@b@shut@\@citeb\endcsname\nat@keyopt@shut
1166
- \fi
1167
- \toks@\expandafter{\NAT@cite@list}%
1168
- \ifx\NAT@cite@list\@empty
1169
- \@temptokena\expandafter{\@citeb}%
1170
- \else
1171
- \@temptokena\expandafter{\expandafter,\@citeb}%
1172
- \fi
1173
- \edef\NAT@cite@list{\the\toks@\the\@temptokena}%
1174
- }%
1175
- \newcommand\NAT@sort@cites@[1]{%
1176
- \count@\z@
1177
- \@tempcntb\m@ne
1178
- \let\@celt\delimiter
1179
- \def\NAT@num@list{}%
1180
- \let\NAT@cite@list\@empty
1181
- \let\NAT@nonsort@list\@empty
1182
- \@for \@citeb:=#1\do{\NAT@make@cite@list}%
1183
- \ifx\NAT@nonsort@list\@empty\else
1184
- \protected@edef\NAT@cite@list{\NAT@cite@list\NAT@nonsort@list}%
1185
- \fi
1186
- \ifx\NAT@cite@list\@empty\else
1187
- \protected@edef\NAT@cite@list{\expandafter\NAT@xcom\NAT@cite@list @@}%
1188
- \fi
1189
- }%
1190
- \def\NAT@make@cite@list{%
1191
- \advance\count@\@ne
1192
- \@safe@activestrue
1193
- \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}%
1194
- \@safe@activesfalse
1195
- \@ifundefined{b@\@citeb\@extra@b@citeb}%
1196
- {\def\NAT@num{A}}%
1197
- {\NAT@parse{\@citeb}}%
1198
- \NAT@ifcat@num\NAT@num
1199
- {\@tempcnta\NAT@num \relax
1200
- \@ifnum{\@tempcnta<\@tempcntb}{%
1201
- \let\NAT@@cite@list=\NAT@cite@list
1202
- \let\NAT@cite@list\@empty
1203
- \begingroup\let\@celt=\NAT@celt\NAT@num@list\endgroup
1204
- \protected@edef\NAT@num@list{%
1205
- \expandafter\NAT@num@celt \NAT@num@list \@gobble @%
1206
- }%
1207
- }{%
1208
- \protected@edef\NAT@num@list{\NAT@num@list \@celt{\NAT@num}}%
1209
- \protected@edef\NAT@cite@list{\NAT@cite@list\@citeb,}%
1210
- \@tempcntb\@tempcnta
1211
- }%
1212
- }%
1213
- {\protected@edef\NAT@nonsort@list{\NAT@nonsort@list\@citeb,}}%
1214
- }%
1215
- \def\NAT@celt#1{%
1216
- \@ifnum{#1>\@tempcnta}{%
1217
- \xdef\NAT@cite@list{\NAT@cite@list\@citeb,\NAT@@cite@list}%
1218
- \let\@celt\@gobble
1219
- }{%
1220
- \expandafter\def@NAT@cite@lists\NAT@@cite@list\@@
1221
- }%
1222
- }%
1223
- \def\NAT@num@celt#1#2{%
1224
- \ifx#1\@celt
1225
- \@ifnum{#2>\@tempcnta}{%
1226
- \@celt{\number\@tempcnta}%
1227
- \@celt{#2}%
1228
- }{%
1229
- \@celt{#2}%
1230
- \expandafter\NAT@num@celt
1231
- }%
1232
- \fi
1233
- }%
1234
- \def\def@NAT@cite@lists#1,#2\@@{%
1235
- \xdef\NAT@cite@list{\NAT@cite@list#1,}%
1236
- \xdef\NAT@@cite@list{#2}%
1237
- }%
1238
- \def\NAT@nextc#1,#2@@{#1,}
1239
- \def\NAT@restc#1,#2{#2}
1240
- \def\NAT@xcom#1,@@{#1}
1241
- \InputIfFileExists{natbib.cfg}
1242
- {\typeout{Local config file natbib.cfg used}}{}
1243
- %%
1244
- %% <<<<< End of generated file <<<<<<
1245
- %%
1246
- %% End of file `natbib.sty'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/ref.bib DELETED
@@ -1,784 +0,0 @@
1
- @article{haarnoja2018soft,
2
- title = {Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor},
3
- author = {Tuomas Haarnoja and Aurick Zhou and P. Abbeel and S. Levine},
4
- journal={International Conference on Machine Learning},
5
- year = {2018},
6
- url = {dblp.org/rec/conf/icml/HaarnojaZAL18}
7
- }
8
-
9
- @article{mnih2016asynchronous,
10
- title = {Asynchronous Methods for Deep Reinforcement Learning},
11
- author = {Volodymyr Mnih and Adrià Puigdomènech Badia and Mehdi Mirza and A. Graves and T. Lillicrap and Tim Harley and David Silver and K. Kavukcuoglu},
12
- journal={International Conference on Machine Learning},
13
- year = {2016},
14
- url = {dblp.org/rec/journals/corr/MnihBMGLHSK16}
15
- }
16
-
17
- @article{zoph2016neural,
18
- title = {Neural Architecture Search with Reinforcement Learning},
19
- author = {Barret Zoph and Quoc V. Le},
20
- journal={International Conference on Learning Representations},
21
- year = {2016},
22
- url = {dblp.org/rec/conf/iclr/ZophL17}
23
- }
24
-
25
- @article{mnih2013playing,
26
- title = {Playing Atari with Deep Reinforcement Learning},
27
- author = {Volodymyr Mnih and K. Kavukcuoglu and David Silver and A. Graves and Ioannis Antonoglou and Daan Wierstra and Martin A. Riedmiller},
28
- journal={arXiv.org},
29
- year = {2013},
30
- url = {dblp.org/rec/journals/corr/MnihKSGAWR13}
31
- }
32
-
33
- @article{lillicrap2015continuous,
34
- title = {Continuous control with deep reinforcement learning},
35
- author = {T. Lillicrap and Jonathan J. Hunt and A. Pritzel and N. Heess and T. Erez and Yuval Tassa and David Silver and Daan Wierstra},
36
- journal={International Conference on Learning Representations},
37
- year = {2015},
38
- url = {dblp.org/rec/journals/corr/LillicrapHPHETS15}
39
- }
40
-
41
- @article{hasselt2015deep,
42
- title = {Deep Reinforcement Learning with Double Q-Learning},
43
- author = {H. V. Hasselt and A. Guez and David Silver},
44
- journal={AAAI Conference on Artificial Intelligence},
45
- year = {2015},
46
- url = {dblp.org/rec/journals/corr/HasseltGS15}
47
- }
48
-
49
- @article{sutton2005reinforcement,
50
- title = {Reinforcement Learning: An Introduction},
51
- author = {R. Sutton and A. Barto},
52
- journal={IEEE Transactions on Neural Networks},
53
- year = {2005},
54
- url = {dblp.org/rec/journals/tnn/SuttonB98}
55
- }
56
-
57
- @article{silver2018a,
58
- title = {A general reinforcement learning algorithm that masters chess, shogi, and Go through self-play},
59
- author = {David Silver and T. Hubert and Julian Schrittwieser and Ioannis Antonoglou and Matthew Lai and A. Guez and Marc Lanctot and L. Sifre and D. Kumaran and T. Graepel and T. Lillicrap and K. Simonyan and D. Hassabis},
60
- journal={Science},
61
- year = {2018},
62
- url = {}
63
- }
64
-
65
- @article{mcmahan2016communication,
66
- title = {Communication-Efficient Learning of Deep Networks from Decentralized Data},
67
- author = {H. B. McMahan and Eider Moore and D. Ramage and S. Hampson and B. A. Y. Arcas},
68
- journal={International Conference on Artificial Intelligence and Statistics},
69
- year = {2016},
70
- url = {dblp.org/rec/conf/aistats/McMahanMRHA17}
71
- }
72
-
73
- @article{he2022byzantine,
74
- title = {Byzantine-Robust Decentralized Learning via Self-Centered Clipping},
75
- author = {Lie He and Sai Praneeth Karimireddy and Martin Jaggi},
76
- journal={arXiv.org},
77
- year = {2022},
78
- url = {dblp.org/rec/journals/corr/abs-2202-01545}
79
- }
80
-
81
- @article{fu2022lightweight,
82
- title = {Lightweight Automatic Modulation Classification Based on Decentralized Learning},
83
- author = {Xue Fu and Guan Gui and Yu Wang and T. Ohtsuki and B. Adebisi and H. Gačanin and F. Adachi},
84
- journal={IEEE Transactions on Cognitive Communications and Networking},
85
- year = {2022},
86
- url = {dblp.org/rec/journals/tccn/FuGWOAGA22}
87
- }
88
-
89
- @article{fu2022automatic,
90
- title = {Automatic Modulation Classification Based on Decentralized Learning and Ensemble Learning},
91
- author = {Xue Fu and Guan Gui and Yu Wang and H. Gačanin and F. Adachi},
92
- journal={IEEE Transactions on Vehicular Technology},
93
- year = {2022},
94
- url = {dblp.org/rec/journals/tvt/FuG0GA22}
95
- }
96
-
97
- @article{dandi2022data,
98
- title = {Data-heterogeneity-aware Mixing for Decentralized Learning},
99
- author = {Yatin Dandi and Anastasia Koloskova and Martin Jaggi and S. Stich},
100
- journal={arXiv.org},
101
- year = {2022},
102
- url = {dblp.org/rec/journals/corr/abs-2204-06477}
103
- }
104
-
105
- @article{jeong2022asynchronous,
106
- title = {Asynchronous Decentralized Learning over Unreliable Wireless Networks},
107
- author = {Eunjeong Jeong and Matteo Zecchin and M. Kountouris},
108
- journal={ICC 2022 - IEEE International Conference on Communications},
109
- year = {2022},
110
- url = {dblp.org/rec/conf/icc/JeongZK22}
111
- }
112
-
113
- @article{li2022learning,
114
- title = {Learning to Collaborate in Decentralized Learning of Personalized Models},
115
- author = {Shuang-Yang Li and Tianyi Zhou and Xinmei Tian and Dacheng Tao},
116
- journal={Computer Vision and Pattern Recognition},
117
- year = {2022},
118
- url = {dblp.org/rec/conf/cvpr/LiZ0T22}
119
- }
120
-
121
- @article{esfandiari2021cross,
122
- title = {Cross-Gradient Aggregation for Decentralized Learning from Non-IID data},
123
- author = {Yasaman Esfandiari and Sin Yong Tan and Zhanhong Jiang and Aditya Balu and Ethan Herron and C. Hegde and S. Sarkar},
124
- journal={International Conference on Machine Learning},
125
- year = {2021},
126
- url = {dblp.org/rec/journals/corr/abs-2103-02051}
127
- }
128
-
129
- @article{taheri2022on,
130
- title = {On Generalization of Decentralized Learning with Separable Data},
131
- author = {Hossein Taheri and Christos Thrampoulidis},
132
- journal={arXiv preprint},
133
- year = {2022},
134
- url = {arxiv.org/abs/2209.07116}
135
- }
136
-
137
- @article{paszke2019pytorch,
138
- title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library},
139
- author = {Adam Paszke and Sam Gross and Francisco Massa and Adam Lerer and James Bradbury and Gregory Chanan and Trevor Killeen and Zeming Lin and N. Gimelshein and L. Antiga and Alban Desmaison and Andreas Köpf and E. Yang and Zach DeVito and Martin Raison and A. Tejani and Sasank Chilamkurthy and Benoit Steiner and Lu Fang and Junjie Bai and Soumith Chintala},
140
- journal={Neural Information Processing Systems},
141
- year = {2019},
142
- url = {dblp.org/rec/journals/corr/abs-1912-01703}
143
- }
144
-
145
- @article{madry2017towards,
146
- title = {Towards Deep Learning Models Resistant to Adversarial Attacks},
147
- author = {A. Madry and Aleksandar Makelov and Ludwig Schmidt and Dimitris Tsipras and Adrian Vladu},
148
- journal={International Conference on Learning Representations},
149
- year = {2017},
150
- url = {dblp.org/rec/conf/iclr/MadryMSTV18}
151
- }
152
-
153
- @article{gal2015dropout,
154
- title = {Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning},
155
- author = {Y. Gal and Zoubin Ghahramani},
156
- journal={International Conference on Machine Learning},
157
- year = {2015},
158
- url = {dblp.org/rec/journals/corr/GalG15}
159
- }
160
-
161
- @article{chollet2016xception,
162
- title = {Xception: Deep Learning with Depthwise Separable Convolutions},
163
- author = {François Chollet},
164
- journal={Computer Vision and Pattern Recognition},
165
- year = {2016},
166
- url = {dblp.org/rec/journals/corr/Chollet16a}
167
- }
168
-
169
- @article{liu2014deep,
170
- title = {Deep Learning Face Attributes in the Wild},
171
- author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},
172
- journal={IEEE International Conference on Computer Vision},
173
- year = {2014},
174
- url = {dblp.org/rec/journals/corr/LiuLWT14}
175
- }
176
-
177
- @article{qi2016pointnet,
178
- title = {PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},
179
- author = {C. Qi and Hao Su and Kaichun Mo and L. Guibas},
180
- journal={Computer Vision and Pattern Recognition},
181
- year = {2016},
182
- url = {dblp.org/rec/journals/corr/QiSMG16}
183
- }
184
-
185
- @article{gulshan2016development,
186
- title = {Development and Validation of a Deep Learning Algorithm for Detection of Diabetic Retinopathy in Retinal Fundus Photographs.},
187
- author = {Varun Gulshan and L. Peng and Marc Coram and Martin C. Stumpe and Derek J. Wu and Arunachalam Narayanaswamy and Subhashini Venugopalan and Kasumi Widner and T. Madams and Jorge A Cuadros and R. Kim and R. Raman and Philip Nelson and J. Mega and D. Webster},
188
- journal={Journal of the American Medical Association (JAMA)},
189
- year = {2016},
190
- url = {}
191
- }
192
-
193
- @article{zhang2016understanding,
194
- title = {Understanding deep learning requires rethinking generalization},
195
- author = {Chiyuan Zhang and Samy Bengio and Moritz Hardt and B. Recht and Oriol Vinyals},
196
- journal={International Conference on Learning Representations},
197
- year = {2016},
198
- url = {dblp.org/rec/conf/iclr/ZhangBHRV17}
199
- }
200
-
201
- @article{adadi2018peeking,
202
- title = {Peeking Inside the Black-Box: A Survey on Explainable Artificial Intelligence (XAI)},
203
- author = {Amina Adadi and M. Berrada},
204
- journal={IEEE Access},
205
- year = {2018},
206
- url = {dblp.org/rec/journals/access/AdadiB18}
207
- }
208
-
209
- @article{ridley2022explainable,
210
- title = {Explainable Artificial Intelligence (XAI)},
211
- author = {M. Ridley},
212
- journal={Information Technology and Libraries},
213
- year = {2022},
214
- url = {}
215
- }
216
-
217
- @article{russell1995artificial,
218
- title = {Artificial Intelligence: A Modern Approach},
219
- author = {Stuart J. Russell and Peter Norvig},
220
- journal={arXiv preprint},
221
- year = {1995},
222
- url = {}
223
- }
224
-
225
- @article{ma2022artificial,
226
- title = {Artificial Intelligence A Modern Approach Global Edition},
227
- author = {},
228
- journal={arXiv preprint},
229
- year = {2022},
230
- url = {}
231
- }
232
-
233
- @article{holland1992adaptation,
234
- title = {Adaptation in Natural and Artificial Systems: An Introductory Analysis with Applications to Biology, Control, and Artificial Intelligence},
235
- author = {J. Holland},
236
- journal={arXiv preprint},
237
- year = {1992},
238
- url = {dblp.org/rec/books/mit/H1992}
239
- }
240
-
241
- @article{dufwenberg2011game,
242
- title = {Game theory.},
243
- author = {M. Dufwenberg},
244
- journal={Wiley Interdisciplinary Reviews: Cognitive Science},
245
- year = {2011},
246
- url = {}
247
- }
248
-
249
- @article{9–jan2022combinatorial,
250
- title = {Combinatorial Game Theory},
251
- author = {Jan. 9–Jan},
252
- journal={arXiv preprint},
253
- year = {2022},
254
- url = {}
255
- }
256
-
257
- @article{osborne1995a,
258
- title = {A Course in Game Theory},
259
- author = {M. Osborne and A. Rubinstein},
260
- journal={arXiv preprint},
261
- year = {1995},
262
- url = {}
263
- }
264
-
265
- @article{camerer2003behavioral,
266
- title = {Behavioral Game Theory: Experiments in Strategic Interaction},
267
- author = {Colin Camerer},
268
- journal={arXiv preprint},
269
- year = {2003},
270
- url = {}
271
- }
272
-
273
- @article{myerson1991game,
274
- title = {Game theory - Analysis of Conflict},
275
- author = {R. Myerson},
276
- journal={arXiv preprint},
277
- year = {1991},
278
- url = {dblp.org/rec/books/daglib/0023252}
279
- }
280
-
281
- @article{rabin1993incorporating,
282
- title = {Incorporating Fairness into Game Theory and Economics},
283
- author = {M. Rabin},
284
- journal={arXiv preprint},
285
- year = {1993},
286
- url = {}
287
- }
288
-
289
- @article{roughgarden2010algorithmic,
290
- title = {Algorithmic game theory},
291
- author = {T. Roughgarden},
292
- journal={Communications of the ACM},
293
- year = {2010},
294
- url = {dblp.org/rec/journals/cacm/Roughgarden10}
295
- }
296
-
297
- @article{hesteren2017evolutionary,
298
- title = {Evolutionary Game Theory},
299
- author = {D. M. V. Hesteren},
300
- journal={arXiv preprint},
301
- year = {2017},
302
- url = {}
303
- }
304
-
305
- @article{chen2022multi,
306
- title = {Multi-Agent Reinforcement Learning for Decentralized Resilient Secondary Control of Energy Storage Systems Against DoS Attacks},
307
- author = {Pengcheng Chen and Shichao Liu and Bo Chen and Li Yu},
308
- journal={IEEE Transactions on Smart Grid},
309
- year = {2022},
310
- url = {dblp.org/rec/journals/tsg/ChenLCY22}
311
- }
312
-
313
- @article{lyu2021contrasting,
314
- title = {Contrasting Centralized and Decentralized Critics in Multi-Agent Reinforcement Learning},
315
- author = {Xueguang Lyu and Yuchen Xiao and Brett Daley and Chris Amato},
316
- journal={Adaptive Agents and Multi-Agent Systems},
317
- year = {2021},
318
- url = {dblp.org/rec/conf/atal/LyuXDA21}
319
- }
320
-
321
- @article{su2022ma2ql,
322
- title = {MA2QL: A Minimalist Approach to Fully Decentralized Multi-Agent Reinforcement Learning},
323
- author = {Kefan Su and Siyuan Zhou and Chuang Gan and Xiangjun Wang and Zongqing Lu},
324
- journal={arXiv.org},
325
- year = {2022},
326
- url = {dblp.org/rec/journals/corr/abs-2209-08244}
327
- }
328
-
329
- @article{liu2022federated,
330
- title = {Federated Reinforcement Learning for Decentralized Voltage Control in Distribution Networks},
331
- author = {Haotian Liu and Wenchuan Wu},
332
- journal={IEEE Transactions on Smart Grid},
333
- year = {2022},
334
- url = {dblp.org/rec/journals/tsg/LiuW22a}
335
- }
336
-
337
- @article{lei2022adaptive,
338
- title = {Adaptive Stochastic ADMM for Decentralized Reinforcement Learning in Edge IoT},
339
- author = {Wanlu Lei and Yu Ye and M. Xiao and M. Skoglund and Zhu Han},
340
- journal={IEEE Internet of Things Journal},
341
- year = {2022},
342
- url = {dblp.org/rec/journals/iotj/LeiYXSH22}
343
- }
344
-
345
- @article{lu2021decentralized,
346
- title = {Decentralized Policy Gradient Descent Ascent for Safe Multi-Agent Reinforcement Learning},
347
- author = {Songtao Lu and K. Zhang and Tianyi Chen and T. Başar and L. Horesh},
348
- journal={AAAI Conference on Artificial Intelligence},
349
- year = {2021},
350
- url = {dblp.org/rec/conf/aaai/LuZCBH21}
351
- }
352
-
353
- @article{thumiger2022a,
354
- title = {A Multi-Agent Deep Reinforcement Learning Approach for Practical Decentralized UAV Collision Avoidance},
355
- author = {Nicholas Thumiger and M. Deghat},
356
- journal={IEEE Control Systems Letters},
357
- year = {2022},
358
- url = {dblp.org/rec/journals/csysl/ThumigerD22}
359
- }
360
-
361
- @article{rakkini2022comprehensive,
362
- title = {Comprehensive overview on the deployment of machine learning, deep learning, reinforcement learning algorithms in Selfish mining attack in blockchain},
363
- author = {M. J. Jeyasheela Rakkini and K. Geetha},
364
- journal={2022 IEEE 2nd Mysore Sub Section International Conference (MysuruCon)},
365
- year = {2022},
366
- url = {}
367
- }
368
-
369
- @article{sahu2023an,
370
- title = {An Overview of Machine Learning, Deep Learning, and Reinforcement Learning-Based Techniques in Quantitative Finance: Recent Progress and Challenges},
371
- author = {S. Sahu and A. Mokhade and N. Bokde},
372
- journal={Applied Sciences},
373
- year = {2023},
374
- url = {}
375
- }
376
-
377
- @article{zhao2022alphaholdem,
378
- title = {AlphaHoldem: High-Performance Artificial Intelligence for Heads-Up No-Limit Poker via End-to-End Reinforcement Learning},
379
- author = {Enmin Zhao and Renye Yan and Jinqiu Li and Kai Li and Junliang Xing},
380
- journal={AAAI Conference on Artificial Intelligence},
381
- year = {2022},
382
- url = {dblp.org/rec/conf/aaai/ZhaoYLLX22}
383
- }
384
-
385
- @article{ma2021algorithms,
386
- title = {Algorithms For Reinforcement Learning Synthesis Lectures On Artificial Intelligence And Machine Learning Epdf Read},
387
- author = {},
388
- journal={arXiv preprint},
389
- year = {2021},
390
- url = {}
391
- }
392
-
393
- @article{yang2021an,
394
- title = {An Information Fusion Approach to Intelligent Traffic Signal Control Using the Joint Methods of Multiagent Reinforcement Learning and Artificial Intelligence of Things},
395
- author = {Xiaoxian Yang and Yueshen Xu and Li Kuang and Zhiying Wang and Honghao Gao and Xuejie Wang},
396
- journal={IEEE transactions on intelligent transportation systems (Print)},
397
- year = {2021},
398
- url = {dblp.org/rec/journals/tits/YangXKWGW22}
399
- }
400
-
401
- @article{ribba2020model,
402
- title = {Model‐Informed Artificial Intelligence: Reinforcement Learning for Precision Dosing},
403
- author = {B. Ribba and S. Dudal and T. Lavé and R. Peck},
404
- journal={Clinical pharmacology and therapy},
405
- year = {2020},
406
- url = {}
407
- }
408
-
409
- @article{hrinivich2020artificial,
410
- title = {Artificial intelligence-based radiotherapy machine parameter optimization using reinforcement learning.},
411
- author = {W. Hrinivich and Junghoon Lee},
412
- journal={Medical Physics (Lancaster)},
413
- year = {2020},
414
- url = {}
415
- }
416
-
417
- @article{liu2022joint,
418
- title = {Joint Beamforming, Power Allocation, and Splitting Control for SWIPT-Enabled IoT Networks with Deep Reinforcement Learning and Game Theory},
419
- author = {Jain-Shing Liu and C. Lin and Yu‐Chen Hu and Praveen Kumar Donta},
420
- journal={Italian National Conference on Sensors},
421
- year = {2022},
422
- url = {dblp.org/rec/journals/sensors/LiuLHD22}
423
- }
424
-
425
- @article{duan2022autonomous,
426
- title = {Autonomous driving planning and decision making based on game theory and reinforcement learning},
427
- author = {Weiping Duan and Zhongyi Tang and Wei Liu and Hongbiao Zhou},
428
- journal={Expert Syst. J. Knowl. Eng.},
429
- year = {2022},
430
- url = {dblp.org/rec/journals/es/DuanTLZ23}
431
- }
432
-
433
- @article{liang2022gadqn,
434
- title = {GaDQN-IDS: A Novel Self-Adaptive IDS for VANETs Based on Bayesian Game Theory and Deep Reinforcement Learning},
435
- author = {Junwei Liang and M. Ma and Xu Tan},
436
- journal={IEEE transactions on intelligent transportation systems (Print)},
437
- year = {2022},
438
- url = {dblp.org/rec/journals/tits/LiangMT22}
439
- }
440
-
441
- @article{jin2022security,
442
- title = {Security State Estimation for Cyber-Physical Systems against DoS Attacks via Reinforcement Learning and Game Theory},
443
- author = {Zengwang Jin and Shuting Zhang and Yanyan Hu and Yanning Zhang and Changyin Sun},
444
- journal={Actuators},
445
- year = {2022},
446
- url = {}
447
- }
448
-
449
- @article{yin2022air,
450
- title = {Air Combat Maneuver Decision Based on Deep Reinforcement Learning and Game Theory},
451
- author = {Shuhui Yin and Yu Kang and Yunbo Zhao and Jian Xue},
452
- journal={Cybersecurity and Cyberforensics Conference},
453
- year = {2022},
454
- url = {}
455
- }
456
-
457
- @article{zwillinger2022distributing,
458
- title = {Distributing data throughout a MANET in a communications denied environment: reinforcement learning and game theory approaches},
459
- author = {D. Zwillinger and J. Sierchio and M. Gerken and Emily Clark},
460
- journal={Defense + Commercial Sensing},
461
- year = {2022},
462
- url = {}
463
- }
464
-
465
- @article{purfatideh2014efficent,
466
- title = {Efficent Congestion Control Scheme in Computer Networks using Fuzzy Reinforcement Learning & Game Theory},
467
- author = {Mohamad Bagher Safari Purfatideh and S. Jamali and Morteza Analoei},
468
- journal={arXiv preprint},
469
- year = {2014},
470
- url = {}
471
- }
472
-
473
- @article{teymoori2022dynamic,
474
- title = {Dynamic Multi-user Computation Offloading for Mobile Edge Computing using Game Theory and Deep Reinforcement Learning},
475
- author = {P. Teymoori and A. Boukerche},
476
- journal={ICC 2022 - IEEE International Conference on Communications},
477
- year = {2022},
478
- url = {dblp.org/rec/conf/icc/TeymooriB22}
479
- }
480
-
481
- @article{huang2022toward,
482
- title = {Toward Decentralized and Collaborative Deep Learning Inference for Intelligent IoT Devices},
483
- author = {Yakun Huang and Xiuquan Qiao and S. Dustdar and Jianwei Zhang and Jiulin Li},
484
- journal={IEEE Network},
485
- year = {2022},
486
- url = {dblp.org/rec/journals/network/HuangQDZL22}
487
- }
488
-
489
- @article{lin2021quasi,
490
- title = {Quasi-Global Momentum: Accelerating Decentralized Deep Learning on Heterogeneous Data},
491
- author = {Tao Lin and Sai Praneeth Karimireddy and S. Stich and Martin Jaggi},
492
- journal={International Conference on Machine Learning},
493
- year = {2021},
494
- url = {dblp.org/rec/journals/corr/abs-2102-04761}
495
- }
496
-
497
- @article{takezawa2022momentum,
498
- title = {Momentum Tracking: Momentum Acceleration for Decentralized Deep Learning on Heterogeneous Data},
499
- author = {Yuki Takezawa and Hang Bao and K. Niwa and R. Sato and Makoto Yamada},
500
- journal={arXiv.org},
501
- year = {2022},
502
- url = {dblp.org/rec/journals/corr/abs-2209-15505}
503
- }
504
-
505
- @article{kong2021consensus,
506
- title = {Consensus Control for Decentralized Deep Learning},
507
- author = {Lingjing Kong and Tao Lin and Anastasia Koloskova and Martin Jaggi and S. Stich},
508
- journal={International Conference on Machine Learning},
509
- year = {2021},
510
- url = {dblp.org/rec/conf/icml/00010KJS21}
511
- }
512
-
513
- @article{vogels2021relaysum,
514
- title = {RelaySum for Decentralized Deep Learning on Heterogeneous Data},
515
- author = {Thijs Vogels and Lie He and Anastasia Koloskova and Tao Lin and Sai Praneeth Karimireddy and S. Stich and Martin Jaggi},
516
- journal={Neural Information Processing Systems},
517
- year = {2021},
518
- url = {dblp.org/rec/conf/nips/VogelsHKKLSJ21}
519
- }
520
-
521
- @article{shiri2022decentralized,
522
- title = {Decentralized Distributed Multi-institutional PET Image Segmentation Using a Federated Deep Learning Framework},
523
- author = {I. Shiri and A. Vafaei Sadr and Mehdi Amini and Y. Salimi and Amirhossein Sanaat and A. Akhavanallaf and Behrooz Razeghi and Sohrab Ferdowsi and A. Saberi and Hossein ARABI and M. Becker and S. Voloshynovskiy and Deniz Gündüz and A. Rahmim and H. Zaidi},
524
- journal={Clinical Nuclear Medicine},
525
- year = {2022},
526
- url = {}
527
- }
528
-
529
- @article{jayakody2022fake,
530
- title = {Fake News Detection using a Decentralized Deep Learning Model and Federated Learning},
531
- author = {Nirosh Jayakody and Azeem Mohammad and M. Halgamuge},
532
- journal={Annual Conference of the IEEE Industrial Electronics Society},
533
- year = {2022},
534
- url = {dblp.org/rec/conf/iecon/JayakodyMH22}
535
- }
536
-
537
- @article{sun2021decentralized,
538
- title = {Decentralized Deep Learning for Multi-Access Edge Computing: A Survey on Communication Efficiency and Trustworthiness},
539
- author = {Yuwei Sun and H. Ochiai and H. Esaki},
540
- journal={IEEE Transactions on Artificial Intelligence},
541
- year = {2021},
542
- url = {dblp.org/rec/journals/tai/SunOE22}
543
- }
544
-
545
- @article{lim2022decentralized,
546
- title = {Decentralized Edge Intelligence: A Dynamic Resource Allocation Framework for Hierarchical Federated Learning},
547
- author = {Wei Yang Bryan Lim and Jer Shyuan Ng and Zehui Xiong and Jiangming Jin and Yang Zhang and D. Niyato and C. Leung and C. Miao},
548
- journal={IEEE Transactions on Parallel and Distributed Systems},
549
- year = {2022},
550
- url = {dblp.org/rec/journals/tpds/LimNXJZNLM22}
551
- }
552
-
553
- @article{kang2022communication,
554
- title = {Communication-Efficient and Cross-Chain Empowered Federated Learning for Artificial Intelligence of Things},
555
- author = {Jiawen Kang and Xuandi Li and Jiangtian Nie and Yi Liu and Minrui Xu and Zehui Xiong and D. Niyato and Qiang Yan},
556
- journal={IEEE Transactions on Network Science and Engineering},
557
- year = {2022},
558
- url = {dblp.org/rec/journals/tnse/KangLNLXXNY22}
559
- }
560
-
561
- @article{s2022converging,
562
- title = {Converging Blockchain and Artificial-Intelligence Towards Healthcare: A Decentralized-Private and Intelligence Health Record System},
563
- author = {Arpith S and G. Mufeed and Anusha K R and Gahana},
564
- journal={2022 2nd International Conference on Intelligent Technologies (CONIT)},
565
- year = {2022},
566
- url = {}
567
- }
568
-
569
- @article{facchini2022decentralized,
570
- title = {Decentralized Autonomous Organizations and Multi-agent Systems for Artificial Intelligence Applications and Data Analysis},
571
- author = {Sante Dino Facchini},
572
- journal={International Joint Conference on Artificial Intelligence},
573
- year = {2022},
574
- url = {dblp.org/rec/conf/ijcai/Facchini22}
575
- }
576
-
577
- @article{nouruzi2022toward,
578
- title = {Toward a Smart Resource Allocation Policy via Artificial Intelligence in 6G Networks: Centralized or Decentralized?},
579
- author = {A. Nouruzi and A. Rezaei and Ata Khalili and N. Mokari and M. Javan and Eduard Axel Jorswieck and H. Yanikomeroglu},
580
- journal={arXiv.org},
581
- year = {2022},
582
- url = {dblp.org/rec/journals/corr/abs-2202-09093}
583
- }
584
-
585
- @article{jose2022application,
586
- title = {Application of artificial intelligence in secure decentralized computation enabled by TOTEM},
587
- author = {Dhanya Therese Jose and Chunming Rong and Antorweep Chakravorty},
588
- journal={2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)},
589
- year = {2022},
590
- url = {}
591
- }
592
-
593
- @article{clough2020artificial,
594
- title = {Artificial Intelligence, Data-Driven Learning, and the Decentralized Structure of Platform Ecosystems},
595
- author = {David R. Clough and Andy Wu},
596
- journal={arXiv preprint},
597
- year = {2020},
598
- url = {}
599
- }
600
-
601
- @article{hu2020energy,
602
- title = {Energy Management for isolated renewable-powered microgrids using reinforcement learning and game theory},
603
- author = {R. Hu and A. Kwasinski},
604
- journal={EPE},
605
- year = {2020},
606
- url = {}
607
- }
608
-
609
- @article{adams2020resolving,
610
- title = {Resolving Implicit Coordination in Multi-Agent Deep Reinforcement Learning with Deep Q-Networks & Game Theory},
611
- author = {Griffin Adams and Sarguna Padmanabhan and S. Shekhar},
612
- journal={arXiv.org},
613
- year = {2020},
614
- url = {dblp.org/rec/journals/corr/abs-2012-09136}
615
- }
616
-
617
- @article{zhou2019intelligent,
618
- title = {Intelligent Decentralized Dynamic Power Allocation in MANET at Tactical Edge based on Mean-Field Game Theory},
619
- author = {Zejian Zhou and Lijun Qian and Hao Xu},
620
- journal={IEEE Military Communications Conference},
621
- year = {2019},
622
- url = {dblp.org/rec/conf/milcom/ZhouQX19}
623
- }
624
-
625
- @article{zhou2021decentralized,
626
- title = {Decentralized Optimal Tracking Control for Large-scale Multi-Agent Systems under Complex Environment: A Constrained Mean Field Game with Reinforcement Learning Approach},
627
- author = {Zejian Zhou and Hao Xu},
628
- journal={Conference on Control Technology and Applications},
629
- year = {2021},
630
- url = {dblp.org/rec/conf/ccta/ZhouX21a}
631
- }
632
-
633
- @article{blum2006machine,
634
- title = {Machine Learning , Game Theory , and Mechanism Design for a Networked World},
635
- author = {A. Blum},
636
- journal={arXiv preprint},
637
- year = {2006},
638
- url = {}
639
- }
640
-
641
- @article{goktas2022an,
642
- title = {An Algorithmic Theory of Markets and Their Application to Decentralized Markets},
643
- author = {Denizalp Goktas},
644
- journal={AAAI Conference on Artificial Intelligence},
645
- year = {2022},
646
- url = {dblp.org/rec/conf/aaai/Goktas22}
647
- }
648
-
649
- @article{celli2021decentralized,
650
- title = {Decentralized No-regret Learning Algorithms for Extensive-form Correlated Equilibria (Extended Abstract)},
651
- author = {A. Celli and A. Marchesi and Gabriele Farina and N. Gatti},
652
- journal={International Joint Conference on Artificial Intelligence},
653
- year = {2021},
654
- url = {dblp.org/rec/conf/ijcai/CelliMF021}
655
- }
656
-
657
- @article{kim2018a,
658
- title = {A better-performing Q-learning game-theoretic distributed routing for underwater wireless sensor networks},
659
- author = {Sungwook Kim},
660
- journal={Int. J. Distributed Sens. Networks},
661
- year = {2018},
662
- url = {dblp.org/rec/journals/ijdsn/Kim18}
663
- }
664
-
665
- @article{zhu2020deep,
666
- title = {Deep‐learning artificial intelligence analysis of clinical variables predicts mortality in COVID‐19 patients},
667
- author = {Jocelyn Zhu and Peilin Ge and Chun-guo Jiang and Yong Zhang and Xiaoran Li and Zirun Zhao and Liming Zhang and T. Duong},
668
- journal={Journal of the American College of Emergency Physicians Open},
669
- year = {2020},
670
- url = {}
671
- }
672
-
673
- @article{hiraiwa2019a,
674
- title = {A deep-learning artificial intelligence system for assessment of root morphology of the mandibular first molar on panoramic radiography.},
675
- author = {T. Hiraiwa and Y. Ariji and M. Fukuda and Yoshitaka Kise and K. Nakata and A. Katsumata and H. Fujita and E. Ariji},
676
- journal={Dento maxillo facial radiology},
677
- year = {2019},
678
- url = {}
679
- }
680
-
681
- @article{lemley2017deep,
682
- title = {Deep Learning for Consumer Devices and Services: Pushing the limits for machine learning, artificial intelligence, and computer vision.},
683
- author = {Joseph Lemley and S. Bazrafkan and P. Corcoran},
684
- journal={IEEE Consumer Electronics Magazine},
685
- year = {2017},
686
- url = {dblp.org/rec/journals/cem/LemleyBC17}
687
- }
688
-
689
- @article{joonmyun2020application,
690
- title = {Application Trends of Deep Learning Artificial Intelligence in Autonomous Things},
691
- author = {Cho Joonmyun},
692
- journal={arXiv preprint},
693
- year = {2020},
694
- url = {}
695
- }
696
-
697
- @article{xie2022intelligent,
698
- title = {INTELLIGENT ACQUISITION METHOD OF HERBACEOUS FLOWERS IMAGE BASED ON THEME CRAWLER, DEEP LEARNING AND GAME THEORY},
699
- author = {Zhouyi Xie and Yanrong Hu and Weijun Hu},
700
- journal={Chronos},
701
- year = {2022},
702
- url = {}
703
- }
704
-
705
- @article{anishfathima2022secure,
706
- title = {Secure Wireless Sensor Network Energy Optimization Model with Game Theory and Deep Learning Algorithm},
707
- author = {B. Anishfathima and M. Mahaboob and S.Gokul Kumar and A. Jabakumar},
708
- journal={2022 8th International Conference on Advanced Computing and Communication Systems (ICACCS)},
709
- year = {2022},
710
- url = {}
711
- }
712
-
713
- @article{ardekani2022combining,
714
- title = {Combining Deep Learning and Game Theory for Path Planning in Autonomous Racing Cars},
715
- author = {Amirhossein Afkhami Ardekani and Amirhosein Chahe and M. R. Hairi Yazdi},
716
- journal={International Conference on Robotics and Mechatronics},
717
- year = {2022},
718
- url = {}
719
- }
720
-
721
- @article{cunningham2023a,
722
- title = {A Deep Learning Game Theoretic Model for Defending Against Large Scale Smart Grid Attacks},
723
- author = {James Cunningham and Alexander J. Aved and David Ferris and Philip Morrone and Conrad S. Tucker},
724
- journal={IEEE Transactions on Smart Grid},
725
- year = {2023},
726
- url = {dblp.org/rec/journals/tsg/CunninghamAFMT23}
727
- }
728
-
729
- @article{jia2021lane,
730
- title = {Lane-Changing Behavior Prediction Based on Game Theory and Deep Learning},
731
- author = {Shuo Jia and F. Hui and Cheng Wei and Xiangmo Zhao and Jianbei Liu},
732
- journal={Journal of Advanced Transportation},
733
- year = {2021},
734
- url = {}
735
- }
736
-
737
- @article{sahin2022artificial,
738
- title = {Artificial Intelligence, Game Theory, Programming Used Languages and Platforms, Game Types and Training Methods},
739
- author = {Fatih Sahin},
740
- journal={2022 International Conference on Artificial Intelligence of Things (ICAIoT)},
741
- year = {2022},
742
- url = {}
743
- }
744
-
745
- @article{hanley2021games,
746
- title = {GAMES, game theory and artificial intelligence Game theory and artificial intelligence},
747
- author = {J. Hanley},
748
- journal={arXiv preprint},
749
- year = {2021},
750
- url = {}
751
- }
752
-
753
- @article{wang2022deepholdem,
754
- title = {DeepHoldem: An Efficient End-to-End Texas Hold'em Artificial Intelligence Fusion of Algorithmic Game Theory and Game Information},
755
- author = {Ke Wang and Dongdong Bai and Qibin Zhou},
756
- journal={International Conference on Innovative Computing and Cloud Computing},
757
- year = {2022},
758
- url = {}
759
- }
760
-
761
- @article{shen2021interactive,
762
- title = {Interactive Artificial Intelligence Meets Game Theory in Next-Generation Communication Networks},
763
- author = {Jingyu Shen and Chungang Yang and Tong Li and Xinwei Wang and Yanbo Song and M. Guizani},
764
- journal={IEEE wireless communications},
765
- year = {2021},
766
- url = {dblp.org/rec/journals/wc/ShenYLWSG21}
767
- }
768
-
769
- @article{bai2021smart,
770
- title = {Smart financial policy adjustment system based on multiple game theory and artificial intelligence},
771
- author = {Yunru Bai and Guang Zhang and San Sun},
772
- journal={2021 5th International Conference on Trends in Electronics and Informatics (ICOEI)},
773
- year = {2021},
774
- url = {}
775
- }
776
-
777
- @article{dowe2020game,
778
- title = {Game theory and Artificial Intelligence in just preservation},
779
- author = {D. Dowe and N. Chmait},
780
- journal={arXiv preprint},
781
- year = {2020},
782
- url = {}
783
- }
784
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/related works.tex DELETED
@@ -1,16 +0,0 @@
1
- \section{Related Works}
2
-
3
- \paragraph{Deep Reinforcement Learning for Atari Games}
4
- The seminal work by \citet{mnih2013playing} introduced the first deep learning model to successfully learn control policies directly from high-dimensional sensory input using reinforcement learning. This model outperformed all previous approaches on six of the games and surpassed a human expert on three of them. The authors later extended their work with asynchronous gradient descent for optimization of deep neural network controllers, showing success on a wide variety of continuous motor control problems and a new task of navigating random 3D mazes using a visual input \citep{mnih2016asynchronous}. However, these approaches suffer from overestimations in value function approximations, which were addressed by \citet{hasselt2015deep} through a specific adaptation to the DQN algorithm, leading to much better performance on several games.
5
-
6
- \paragraph{Decentralized Reinforcement Learning}
7
- Decentralized reinforcement learning has been studied in various contexts. \citet{lu2021decentralized} proposed a decentralized policy gradient (PG) method, Safe Dec-PG, to perform policy optimization based on the D-CMDP model over a network. This was the first decentralized PG algorithm that accounted for coupled safety constraints with a quantifiable convergence rate in multi-agent reinforcement learning. \citet{lei2022adaptive} introduced an adaptive stochastic incremental ADMM (asI-ADMM) algorithm for decentralized RL with edge-computing-empowered IoT networks, showing better performance in terms of communication costs and scalability compared to the state of the art. However, the work by \citet{lyu2021contrasting} highlighted misconceptions regarding centralized critics in the literature, emphasizing that both centralized and decentralized critics have different pros and cons that should be considered by algorithm designers.
8
-
9
- \paragraph{Game Theory and Multi-Agent Reinforcement Learning}
10
- Game theory has been widely used in combination with reinforcement learning to tackle multi-agent problems. \citet{yin2022air} proposed an algorithm based on deep reinforcement learning and game theory to solve Nash equilibrium strategy in highly competitive environments, demonstrating good convergence through simulation tests. \citet{adams2020resolving} addressed the challenges of implicit coordination in multi-agent deep reinforcement learning by combining Deep-Q Networks for policy learning with Nash equilibrium for action selection. In the context of autonomous driving, \citet{duan2022autonomous} proposed an automatic drive model based on game theory and reinforcement learning, enabling multi-agent cooperative driving with strategic reasoning and negotiation in traffic scenarios. However, these approaches often require complex computations and may not scale well to large-scale problems.
11
-
12
- \paragraph{Decentralized Learning with Communication Constraints}
13
- One of the challenges in decentralized learning is to handle communication constraints. \citet{kong2021consensus} showed that decentralized training converges as fast as the centralized counterpart when the training consensus distance is lower than a critical quantity, providing insights for designing better decentralized training schemes. \citet{fu2022automatic} proposed a decentralized ensemble learning framework for automatic modulation classification, reducing communication overhead while maintaining similar classification performance. In the context of multi-agent systems, \citet{su2022ma2ql} introduced MA2QL, a minimalist approach to fully decentralized cooperative MARL with theoretical guarantees on convergence to a Nash equilibrium when each agent achieves $\varepsilon$-convergence at each turn. However, these methods may still suffer from limitations in highly dynamic and complex environments.
14
-
15
- \paragraph{Decentralized Collision Avoidance}
16
- Decentralized collision avoidance has been an important application of reinforcement learning. \citet{thumiger2022a} proposed an improved deep reinforcement learning controller for decentralized collision avoidance using a unique architecture incorporating long-short term memory cells and a reward function inspired by gradient-based approaches. This controller outperformed existing techniques in environments with variable numbers of agents. In the context of autonomous vehicles, \citet{ardekani2022combining} suggested a novel algorithm based on Nash equilibrium and memory neural networks for path selection in highly dynamic and complex environments, showing that the obtained response matched with Nash equilibrium in 90.2 percent of the situations during simulation experiments. However, these approaches may require extensive training and computational resources, which could be a concern in real-world applications.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
outputs/outputs_20230608_115759/template.tex DELETED
@@ -1,35 +0,0 @@
1
- \documentclass{article} % For LaTeX2e
2
- \UseRawInputEncoding
3
- \usepackage{graphicx}
4
- \usepackage{booktabs}
5
- \usepackage{iclr2022_conference, times}
6
- \input{math_commands.tex}
7
- \usepackage{hyperref}
8
- \usepackage{url}
9
- \usepackage{algorithm}
10
- \usepackage{algpseudocode}
11
-
12
- \title{TITLE}
13
- \author{GPT-4}
14
-
15
- \newcommand{\fix}{\marginpar{FIX}}
16
- \newcommand{\new}{\marginpar{NEW}}
17
-
18
- \begin{document}
19
- \maketitle
20
- \input{abstract.tex}
21
- \input{introduction.tex}
22
- \input{related works.tex}
23
- \input{backgrounds.tex}
24
- \input{methodology.tex}
25
- \input{experiments.tex}
26
- \input{conclusion.tex}
27
-
28
- \bibliography{ref}
29
- \bibliographystyle{iclr2022_conference}
30
-
31
- %\appendix
32
- %\section{Appendix}
33
- %You may include other additional sections here.
34
-
35
- \end{document}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/gpt_interaction.py CHANGED
@@ -1,12 +1,13 @@
1
  import os
 
 
2
  import openai
3
  import logging
4
  import requests
5
-
6
 
7
  log = logging.getLogger(__name__)
8
 
9
-
10
  def get_gpt_responses(systems, prompts, model="gpt-4", temperature=0.4):
11
  conversation_history = [
12
  {"role": "system", "content": systems},
@@ -24,33 +25,98 @@ def get_gpt_responses(systems, prompts, model="gpt-4", temperature=0.4):
24
  return assistant_message, usage
25
 
26
 
27
- def get_gpt_responses_test(systems, prompts, model="gpt-4", temperature=0.4, base_url=None, key=None):
28
- end_point = r"/v1/completions"
29
- if base_url is None:
30
- base_url = r"https://api.openai.com" + end_point
31
- if key is None:
32
- key = os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
- url = base_url + end_point
 
 
 
 
 
 
 
 
35
 
36
- headers = {
37
- 'Content-Type': 'application/json',
38
- 'Authorization': f'Bearer {key}' # <-- 把 fkxxxxx 替换成你自己的 Forward Key,注意前面的 Bearer 要保留,并且和 Key 中间有一个空格。
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- message = [{"role": "system", "content": systems},
42
- {"role": "user", "content": prompts}]
43
- data = {
44
- "model": model,
45
- "message": message,
46
- "temperature": temperature
47
- }
48
- print(data)
49
- response = requests.post(url, headers=headers, json=data)
50
- print(response)
51
- response = response.json()
52
- return response['choices'][0]["message"]["content"]
53
 
54
 
55
  if __name__ == "__main__":
56
- pass
 
 
 
1
  import os
2
+ import time
3
+
4
  import openai
5
  import logging
6
  import requests
7
+ import json
8
 
9
  log = logging.getLogger(__name__)
10
 
 
11
  def get_gpt_responses(systems, prompts, model="gpt-4", temperature=0.4):
12
  conversation_history = [
13
  {"role": "system", "content": systems},
 
25
  return assistant_message, usage
26
 
27
 
28
+ class GPTModel_API2D_SUPPORT:
29
+ def __init__(self, model="gpt-4", temperature=0, presence_penalty=0,
30
+ frequency_penalty=0, url=None, key=None, max_attempts=1, delay=20):
31
+ if url is None:
32
+ url = "https://api.openai.com/v1/chat/completions"
33
+ if key is None:
34
+ key = os.getenv("OPENAI_API_KEY")
35
+
36
+ self.model = model
37
+ self.temperature = temperature
38
+ self.url = url
39
+ self.key = key
40
+ self.presence_penalty = presence_penalty
41
+ self.frequency_penalty = frequency_penalty
42
+ self.max_attempts = max_attempts
43
+ self.delay = delay
44
+
45
+ def __call__(self, systems, prompts, return_json=False):
46
+ headers = {
47
+ "Content-Type": "application/json",
48
+ "Authorization": f"Bearer {self.key}",
49
+ }
50
+
51
+ data = {
52
+ "model": f"{self.model}",
53
+ "messages": [
54
+ {"role": "system", "content": systems},
55
+ {"role": "user", "content": prompts}],
56
+ "temperature": self.temperature,
57
+ "n": 1,
58
+ "stream": False,
59
+ "presence_penalty": self.presence_penalty,
60
+ "frequency_penalty": self.frequency_penalty
61
+ }
62
+ for _ in range(self.max_attempts):
63
+ try:
64
+ # todo: in some cases, UnicodeEncodeError is raised:
65
+ # 'gbk' codec can't encode character '\xdf' in position 1898: illegal multibyte sequence
66
+ response = requests.post(self.url, headers=headers, data=json.dumps(data))
67
+ response = response.json()
68
+ assistant_message = response['choices'][0]["message"]["content"]
69
+ usage = response['usage']
70
+ log.info(assistant_message)
71
+ if return_json:
72
+ assistant_message = json.loads(assistant_message)
73
+ return assistant_message, usage
74
+ except Exception as e:
75
+ print(f"Failed to get response. Error: {e}")
76
+ time.sleep(self.delay)
77
+ raise RuntimeError("Failed to get response from OpenAI.")
78
+
79
 
80
+ class GPTModel:
81
+ def __init__(self, model="gpt-4", temperature=0.9, presence_penalty=0,
82
+ frequency_penalty=0, max_attempts=1, delay=20):
83
+ self.model = model
84
+ self.temperature = temperature
85
+ self.presence_penalty = presence_penalty
86
+ self.frequency_penalty = frequency_penalty
87
+ self.max_attempts = max_attempts
88
+ self.delay = delay
89
 
90
+ def __call__(self, systems, prompts, return_json=False):
91
+ conversation_history = [
92
+ {"role": "system", "content": systems},
93
+ {"role": "user", "content": prompts}
94
+ ]
95
+ for _ in range(self.max_attempts):
96
+ try:
97
+ response = openai.ChatCompletion.create(
98
+ model=self.model,
99
+ messages=conversation_history,
100
+ n=1,
101
+ temperature=self.temperature,
102
+ presence_penalty=self.presence_penalty,
103
+ frequency_penalty=self.frequency_penalty,
104
+ stream=False
105
+ )
106
+ assistant_message = response['choices'][0]["message"]["content"]
107
+ usage = response['usage']
108
+ log.info(assistant_message)
109
+ if return_json:
110
+ assistant_message = json.loads(assistant_message)
111
+ return assistant_message, usage
112
+ except Exception as e:
113
+ print(f"Failed to get response. Error: {e}")
114
+ time.sleep(self.delay)
115
+ raise RuntimeError("Failed to get response from OpenAI.")
116
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
 
119
  if __name__ == "__main__":
120
+ bot = GPTModel()
121
+ r = bot("You are an assistant.", "Hello.")
122
+ print(r)
utils/knowledge.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tiktoken
2
+ from random import shuffle
3
+
4
+ # `tokenizer`: used to count how many tokens
5
+ tokenizer_name = tiktoken.encoding_for_model('gpt-4')
6
+ tokenizer = tiktoken.get_encoding(tokenizer_name.name)
7
+
8
+ def tiktoken_len(text):
9
+ # evaluate how many tokens for the given text
10
+ tokens = tokenizer.encode(text, disallowed_special=())
11
+ return len(tokens)
12
+
13
+
14
+ class Knowledge:
15
+ def __init__(self, db):
16
+ self.db = db
17
+ self.contents = []
18
+
19
+ def collect_knowledge(self, keywords_dict, max_query):
20
+ """
21
+ keywords_dict:
22
+ {"machine learning": 5, "language model": 2};
23
+ """
24
+ db = self.db
25
+ if max_query > 0:
26
+ for kw in keywords_dict:
27
+ docs = db.similarity_search_with_score(kw, k=max_query)
28
+ for i in range(max_query):
29
+ content = {"content": docs[i][0].page_content.replace('\n', ' '),
30
+ "score": docs[i][1]} # todo: add more meta information; clean the page_content
31
+ self.contents.append(content)
32
+ # sort contents by score / shuffle
33
+ shuffle(self.contents)
34
+
35
+ def to_prompts(self, max_tokens=2048):
36
+ if len(self.contents) == 0:
37
+ return ""
38
+ prompts = []
39
+ tokens = 0
40
+ for idx, content in enumerate(self.contents):
41
+ prompt = "Reference {}: {}\n".format(idx, content["content"])
42
+ tokens += tiktoken_len(prompt)
43
+ if tokens >= max_tokens:
44
+ break
45
+ else:
46
+ prompts.append(prompt)
47
+ return "".join(prompts)
utils/prompts.py CHANGED
@@ -12,13 +12,21 @@ log = logging.getLogger(__name__)
12
 
13
  # two parameters: min_refs_num, max_refs_num
14
  keywords_system_template = """You are an assistant designed to provide accurate and informative keywords of searching academic papers.
15
- The user will input the tile of a paper. You need to return three to five most related fields. \n
16
  Instructions:\n
17
  - Assign numbers to each field to present the importance. The larger, the more important. \n
18
  - {max_refs_num} is the most important and {min_refs_num} is the least important. \n
19
  - Your response should follow the following format: {{"field1": 5, "field2": 7, "field3": 8, "field4": 5}}\n
20
  - Ensure the response can be parsed by Python json.loads"""
21
 
 
 
 
 
 
 
 
 
22
  # two parameters: min_refs_num, max_refs_num
23
  exp_methods_system_template = """You are an assistant designed to provide most related algorithms or methods to a given paper title.
24
  Instructions
@@ -26,6 +34,59 @@ Instructions
26
  - The length of list should between {min_exps_num} and {max_exps_num}
27
  - Use abbreviation to make each method's name have 5 characters or less."""
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  # one parameter: research_field
30
  section_generation_system_template = r"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.
31
  Instructions
@@ -39,6 +100,14 @@ EXP_METHODS_SYSTEM = PromptTemplate(input_variables=["min_exps_num", "max_exps_n
39
  template=exp_methods_system_template)
40
  SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
41
  template=section_generation_system_template)
 
 
 
 
 
 
 
 
42
 
43
 
44
  ######################################################################################################################
@@ -47,7 +116,6 @@ SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
47
 
48
  cur_path = os.path.dirname(__file__)
49
  prompts_path = os.path.join(cur_path, '../prompts/instructions.json')
50
- print(prompts_path)
51
  with open(prompts_path, "r") as f:
52
  INSTRUCTIONS = json.load(f)
53
  # f = open(file_path)
 
12
 
13
  # two parameters: min_refs_num, max_refs_num
14
  keywords_system_template = """You are an assistant designed to provide accurate and informative keywords of searching academic papers.
15
+ The user will input the title of a paper. You need to return three to five most related fields. \n
16
  Instructions:\n
17
  - Assign numbers to each field to present the importance. The larger, the more important. \n
18
  - {max_refs_num} is the most important and {min_refs_num} is the least important. \n
19
  - Your response should follow the following format: {{"field1": 5, "field2": 7, "field3": 8, "field4": 5}}\n
20
  - Ensure the response can be parsed by Python json.loads"""
21
 
22
+ keywords_system_prompt_str = """You are an assistant designed to provide accurate and informative keywords of searching academic papers.
23
+ The user will input the title of a paper. You need to return three to five most related fields. \n
24
+ Instructions:\n
25
+ - Assign numbers to each field to present the importance. The larger, the more important. \n
26
+ - 10 is the most important and 1 is the least important. \n
27
+ - Your response should follow the following format: {"field 1": 5, "field 2": 7, "field 3": 8, "field 4": 5}\n
28
+ - Ensure the response can be parsed by Python json.loads"""
29
+
30
  # two parameters: min_refs_num, max_refs_num
31
  exp_methods_system_template = """You are an assistant designed to provide most related algorithms or methods to a given paper title.
32
  Instructions
 
34
  - The length of list should between {min_exps_num} and {max_exps_num}
35
  - Use abbreviation to make each method's name have 5 characters or less."""
36
 
37
+ contribution_system_prompt_str = '''You are an assistant designed to propose potential contributions of a given title of the paper. Ensure follow the following instructions:
38
+ Instruction:
39
+ - Your response should follow the JSON format.
40
+ - Your response should have the following structure: {"contribution1": {"statement": "briefly describe what the contribution is", "reason": "reason why this contribution has not been made by other literatures"}, "contribution2": {"statement": "briefly describe what the contribution is", "reason": "reason why this contribution has not been made by other literatures"}, ...}'''
41
+
42
+ media_system_prompt_str = '''
43
+ You are an assistant designed to propose necessary components of an academic papers. You need to decide which components should be included to achieve this paper's contributions.
44
+
45
+ Available components: Figure, Table, Definition, Algorithm.
46
+
47
+ Instruction:
48
+ - Your response should follow the JSON format.
49
+ - Your response should have the following structure: {"Figure 1": {"description": "breifly describe what the figure is", "reason": "why this figure is necessary to show the contribution of this paper"}, "Figure 2": {"description": "breifly describe what the figure is", "reason": "why this figure is necessary to show the contribution of this pape"}, "Table 1": {"description": "breifly describe what the table is", "reason": "why this table is necessary to show the contribution of this pape"}, ...}
50
+
51
+ Example:
52
+ Input:
53
+ "Title: Playing Atari game using De-Centralized PPO
54
+ Contributions: The main contributions of this paper are threefold: (1) We propose a novel adaptation of PPO for de-centralized multi-agent Atari gameplay, building upon the existing PPO framework (Wijmans et al.,2020). (2) We provide a comprehensive evaluation of our decentralized PPO approach, comparing its performance to state-of-the-art centralized methods in the Atari domain. (3) We identify key factors influencing the performance of decentralized PPO in Atari games and provide insights into potential avenues for future research in decentralized DRL."
55
+ Response:
56
+ {
57
+ "Figure 1": {
58
+ "description": "Architecture of the proposed decentralized PPO adaptation",
59
+ "reason": "To visually present the novel adaptation of PPO for decentralized multi-agent Atari gameplay and highlight the differences from the existing PPO framework"
60
+ },
61
+ "Figure 2": {
62
+ "description": "Performance comparison of decentralized PPO with state-of-the-art centralized methods",
63
+ "reason": "To depict the effectiveness of our proposed approach by comparing its performance to existing centralized methods in the Atari domain"
64
+ },
65
+ "Figure 3": {
66
+ "description": "Factors and hyperparameters affecting the performance of decentralized PPO",
67
+ "reason": "To illustrate the key factors influencing the performance of decentralized PPO and their impact on various Atari games"
68
+ },
69
+ "Definition 1":{
70
+ "description": "the novel evaluation metric for decentralized PPO approach",
71
+ "reason": "To highlight the difference from other existing literatures"
72
+ },
73
+ "Table 1": {
74
+ "description": "Summary of the experimental results from the evaluation of our decentralized PPO approach",
75
+ "reason": "To show the comprehensive evaluation of our approach and its performance on multiple Atari games compared with state-of-the-art centralized methods"
76
+ },
77
+ "Algorithm 1": {
78
+ "description": "Pseudocode of the proposed decentralized PPO algorithm",
79
+ "reason": "To provide a clear and concise representation of our novel adaptation of PPO for decentralized multi-agent Atari gameplay"
80
+ }
81
+ }'''
82
+
83
+ preliminaries_system_prompt_str = '''You are an assistant designed to propose preliminary concepts for a paper given its title and contributions. Ensure follow the following instructions:
84
+ Instruction:
85
+ - Your response should follow the JSON format.
86
+ - Your response should have the following structure: {"name of the concept": 1, {"name of the concept": 2, ...}
87
+ - Smaller number means the concept is more fundamental and should be introduced earlier. '''
88
+
89
+
90
  # one parameter: research_field
91
  section_generation_system_template = r"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.
92
  Instructions
 
100
  template=exp_methods_system_template)
101
  SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
102
  template=section_generation_system_template)
103
+ CONTRIBUTION = contribution_system_prompt_str
104
+ COMPONENTS = media_system_prompt_str
105
+ PRELIMINARIES = preliminaries_system_prompt_str
106
+ KEYWORDS = keywords_system_prompt_str
107
+
108
+ SYSTEM = {"keywords": KEYWORDS, "experiment_methods": EXP_METHODS_SYSTEM,
109
+ "contributions": CONTRIBUTION, "components": COMPONENTS,
110
+ "preliminaries": PRELIMINARIES}
111
 
112
 
113
  ######################################################################################################################
 
116
 
117
  cur_path = os.path.dirname(__file__)
118
  prompts_path = os.path.join(cur_path, '../prompts/instructions.json')
 
119
  with open(prompts_path, "r") as f:
120
  INSTRUCTIONS = json.load(f)
121
  # f = open(file_path)
utils/references.py CHANGED
@@ -17,7 +17,7 @@
17
  # (2) separate references:
18
  # divide references into different groups to reduce the tokens count
19
  # for generating different paragraph of related works, use different set of references
20
-
21
  import requests
22
  import re
23
  import bibtexparser
@@ -28,11 +28,75 @@ import tiktoken
28
  import itertools, uuid, json
29
  from gradio_client import Client
30
  import time
 
 
 
31
 
 
 
 
32
 
33
  ######################################################################################################################
34
  # Some basic tools
35
  ######################################################################################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  def remove_newlines(serie):
37
  # This function is applied to the abstract of each paper to reduce the length of prompts.
38
  serie = serie.replace('\n', ' ')
@@ -90,7 +154,6 @@ def load_papers_from_bibtex(bib_file_path):
90
  bib_papers.append(result)
91
  return bib_papers
92
 
93
-
94
  # `tokenizer`: used to count how many tokens
95
  tokenizer_name = tiktoken.encoding_for_model('gpt-4')
96
  tokenizer = tiktoken.get_encoding(tokenizer_name.name)
@@ -226,12 +289,13 @@ def _collect_papers_ss(keyword, counts=3, tldr=False):
226
  ######################################################################################################################
227
 
228
  class References:
229
- def __init__(self, title, load_papers=None, keyword="customized_refs"):
230
  if load_papers is not None:
231
  self.papers = {keyword: load_papers_from_bibtex(load_papers)}
232
  else:
233
  self.papers = {}
234
  self.title = title
 
235
 
236
  def load_papers(self, bibtex, keyword):
237
  self.papers[keyword] = load_papers_from_bibtex(bibtex)
@@ -254,7 +318,6 @@ class References:
254
  comb_keywords = list(itertools.combinations(keywords, 2))
255
  for comb_keyword in comb_keywords:
256
  keywords.append(" ".join(comb_keyword))
257
- print("Keywords: ", keywords)
258
  for key in keywords:
259
  self.papers[key] = _collect_papers_ss(key, 10, tldr)
260
  # print("Collected papers: ", papers)
@@ -322,15 +385,17 @@ class References:
322
  try:
323
  # Use external API to obtain the most relevant papers
324
  title = self.title
325
- client = Client("https://shaocongma-evaluate-specter-embeddings.hf.space/")
326
- result = client.predict(
327
- title, # str in 'Title' Textbox component
328
- json_path, # str (filepath or URL to file) in 'Papers JSON (as string)' File component
329
- 50, # int | float (numeric value between 1 and 50) in 'Top-k Relevant Papers' Slider component
330
- api_name="/get_k_relevant_papers"
331
- )
332
- with open(result) as f:
333
- result = json.load(f)
 
 
334
  result = [item for key, item in result.items()]
335
  except Exception as e:
336
  print(f"Error occurs during calling external API: {e}\n")
 
17
  # (2) separate references:
18
  # divide references into different groups to reduce the tokens count
19
  # for generating different paragraph of related works, use different set of references
20
+ from typing import Dict, List
21
  import requests
22
  import re
23
  import bibtexparser
 
28
  import itertools, uuid, json
29
  from gradio_client import Client
30
  import time
31
+ import numpy as np
32
+ from numpy.linalg import norm
33
+
34
 
35
+ URL = "https://model-apis.semanticscholar.org/specter/v1/invoke"
36
+ MAX_BATCH_SIZE = 16
37
+ MAX_ATTEMPTS = 20
38
 
39
  ######################################################################################################################
40
  # Some basic tools
41
  ######################################################################################################################
42
+ def evaluate_cosine_similarity(v1, v2):
43
+ try:
44
+ return np.dot(v1, v2)/(norm(v1)*norm(v2))
45
+ except ValueError:
46
+ return 0.0
47
+
48
+ def chunks(lst, chunk_size=MAX_BATCH_SIZE):
49
+ """Splits a longer list to respect batch size"""
50
+ for i in range(0, len(lst), chunk_size):
51
+ yield lst[i : i + chunk_size]
52
+
53
+ def embed(papers):
54
+ embeddings_by_paper_id: Dict[str, List[float]] = {}
55
+ for chunk in chunks(papers):
56
+ # Allow Python requests to convert the data above to JSON
57
+ response = requests.post(URL, json=chunk)
58
+
59
+ if response.status_code != 200:
60
+ raise RuntimeError("Sorry, something went wrong, please try later!")
61
+
62
+ for paper in response.json()["preds"]:
63
+ embeddings_by_paper_id[paper["paper_id"]] = paper["embedding"]
64
+
65
+ return embeddings_by_paper_id
66
+
67
+ def get_embeddings(paper_title, paper_description):
68
+ output = [{"title": paper_title, "abstract": paper_description, "paper_id": "target_paper"}]
69
+ emb_vector = embed(output)["target_paper"]
70
+ target_paper = output[0]
71
+ target_paper["embeddings"] = emb_vector
72
+ return target_paper
73
+
74
+ def get_top_k(papers_dict, paper_title, paper_description, k=None):
75
+ target_paper = get_embeddings(paper_title, paper_description)
76
+ papers = papers_dict # must include embeddings
77
+
78
+ # if k < len(papers_json), return k most relevant papers
79
+ # if k >= len(papers_json) or k is None, return all papers
80
+ max_num_papers = len(papers)
81
+ if k is None:
82
+ k = max_num_papers
83
+ num_papers = min(k, max_num_papers)
84
+
85
+ # evaluate the cosine similarity for each paper
86
+ target_embedding_vector = target_paper["embeddings"]
87
+
88
+ for k in papers:
89
+ v = papers[k]
90
+ embedding_vector = v["embeddings"]
91
+ cos_sim = evaluate_cosine_similarity(embedding_vector, target_embedding_vector)
92
+ papers[k]["cos_sim"] = cos_sim
93
+
94
+ # return the best k papers
95
+ sorted_papers = {k: v for k, v in sorted(papers.items(), key=lambda x: x[1]["cos_sim"], reverse=True)[:num_papers]}
96
+ for key in sorted_papers:
97
+ sorted_papers[key].pop("embeddings", None)
98
+ return sorted_papers
99
+
100
  def remove_newlines(serie):
101
  # This function is applied to the abstract of each paper to reduce the length of prompts.
102
  serie = serie.replace('\n', ' ')
 
154
  bib_papers.append(result)
155
  return bib_papers
156
 
 
157
  # `tokenizer`: used to count how many tokens
158
  tokenizer_name = tiktoken.encoding_for_model('gpt-4')
159
  tokenizer = tiktoken.get_encoding(tokenizer_name.name)
 
289
  ######################################################################################################################
290
 
291
  class References:
292
+ def __init__(self, title, load_papers=None, keyword="customized_refs", description=""):
293
  if load_papers is not None:
294
  self.papers = {keyword: load_papers_from_bibtex(load_papers)}
295
  else:
296
  self.papers = {}
297
  self.title = title
298
+ self.description = description
299
 
300
  def load_papers(self, bibtex, keyword):
301
  self.papers[keyword] = load_papers_from_bibtex(bibtex)
 
318
  comb_keywords = list(itertools.combinations(keywords, 2))
319
  for comb_keyword in comb_keywords:
320
  keywords.append(" ".join(comb_keyword))
 
321
  for key in keywords:
322
  self.papers[key] = _collect_papers_ss(key, 10, tldr)
323
  # print("Collected papers: ", papers)
 
385
  try:
386
  # Use external API to obtain the most relevant papers
387
  title = self.title
388
+ description = self.description
389
+ result = get_top_k(papers_json, title, description)
390
+ # client = Client("https://shaocongma-evaluate-specter-embeddings.hf.space/")
391
+ # result = client.predict(
392
+ # title, # str in 'Title' Textbox component
393
+ # json_path, # str (filepath or URL to file) in 'Papers JSON (as string)' File component
394
+ # 50, # int | float (numeric value between 1 and 50) in 'Top-k Relevant Papers' Slider component
395
+ # api_name="/get_k_relevant_papers"
396
+ # )
397
+ # with open(result) as f:
398
+ # result = json.load(f)
399
  result = [item for key, item in result.items()]
400
  except Exception as e:
401
  print(f"Error occurs during calling external API: {e}\n")