Spaces:
Runtime error
Runtime error
gradio version update. adding more examples for the task. adding new fields for each paper.
Browse files- app.py +97 -34
- input_format.py +4 -61
- requirements.txt +6 -11
- score.py +7 -1
- style.css +1 -1
app.py
CHANGED
@@ -53,7 +53,7 @@ def get_similar_paper(
|
|
53 |
# print('computing document scores...')
|
54 |
#progress(0.5, desc="Computing document scores...")
|
55 |
# TODO detect duplicate papers?
|
56 |
-
titles, abstracts, paper_urls, doc_scores = compute_document_score(
|
57 |
doc_model,
|
58 |
tokenizer,
|
59 |
title_input,
|
@@ -68,7 +68,9 @@ def get_similar_paper(
|
|
68 |
'titles': titles,
|
69 |
'abstracts': abstracts,
|
70 |
'urls': paper_urls,
|
71 |
-
'doc_scores': doc_scores
|
|
|
|
|
72 |
}
|
73 |
|
74 |
# Select top 10 papers to show
|
@@ -76,6 +78,8 @@ def get_similar_paper(
|
|
76 |
abstracts = abstracts[:10]
|
77 |
doc_scores = doc_scores[:10]
|
78 |
paper_urls = paper_urls[:10]
|
|
|
|
|
79 |
|
80 |
display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(titles, doc_scores)]
|
81 |
end = time.time()
|
@@ -119,7 +123,9 @@ def get_similar_paper(
|
|
119 |
'source_sentences': input_sentences,
|
120 |
'highlight': word_scores,
|
121 |
'top_pairs': top_pairs_info,
|
122 |
-
'url': url
|
|
|
|
|
123 |
}
|
124 |
|
125 |
end = time.time()
|
@@ -127,12 +133,11 @@ def get_similar_paper(
|
|
127 |
print('done in [%0.2f] seconds'%(highlight_time))
|
128 |
|
129 |
## Set up output elements
|
130 |
-
|
131 |
## Components for Initial Part
|
132 |
result1_desc_value = """
|
133 |
<h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
|
134 |
|
135 |
-
For each paper, top %d sentence pairs (one from the submission, one from the paper) with the highest relevance scores are shown.
|
136 |
|
137 |
**<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
|
138 |
"""%(int(top_paper_slider), author_id_input, results['name'], int(top_pair_slider))
|
@@ -161,7 +166,7 @@ def get_similar_paper(
|
|
161 |
title_out = """<a href="%s" target="_blank"><h5>%s</h5></a>"""%(url, title)
|
162 |
aff_score_out = '##### Affinity Score: %s'%aff_score
|
163 |
result2_desc_value = """
|
164 |
-
##### Click a paper by %s (left, sorted by affinity scores), and a sentence from the submission (center), to see which parts of the paper are relevant (right).
|
165 |
"""%results['name']
|
166 |
out3 = [
|
167 |
gr.update(choices=display_title, value=display_title[0], interactive=True), # set of papers (radio)
|
@@ -169,9 +174,11 @@ def get_similar_paper(
|
|
169 |
gr.update(value=title_out), # paper_title
|
170 |
gr.update(value=aff_score_out), # affinity
|
171 |
gr.update(value=result2_desc_value), # result 2 description (show more section)
|
172 |
-
gr.update(value=
|
173 |
]
|
174 |
|
|
|
|
|
175 |
## Return by adding the State variable info
|
176 |
return out1 + out2 + out3 + [results]
|
177 |
|
@@ -179,9 +186,12 @@ def setup_outputs(info, top_papers_show, top_num_info_show):
|
|
179 |
titles = info['titles']
|
180 |
doc_scores = info['doc_scores']
|
181 |
paper_urls = info['urls']
|
|
|
|
|
182 |
display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(info['titles'], info['doc_scores'])]
|
183 |
title = []
|
184 |
affinity = []
|
|
|
185 |
sent_pair_score = []
|
186 |
sent_text_query = []
|
187 |
sent_text_candidate = []
|
@@ -191,22 +201,28 @@ def setup_outputs(info, top_papers_show, top_num_info_show):
|
|
191 |
for i in range(top_papers_show):
|
192 |
if i == 0:
|
193 |
title.append(
|
194 |
-
gr.update(value="""<a href="%s" target="_blank"><h4>%s</h4></a>"""%(paper_urls[i], titles[i]), visible=True)
|
195 |
)
|
196 |
affinity.append(
|
197 |
gr.update(value="""#### Affinity Score: %0.3f
|
198 |
<div class="help-tip">
|
199 |
-
|
200 |
</div>
|
201 |
"""%doc_scores[i], visible=True) # document affinity
|
202 |
)
|
|
|
|
|
|
|
203 |
else:
|
204 |
title.append(
|
205 |
-
gr.update(value="""<a href="%s" target="_blank"><h4>%s</h4></a>"""%(paper_urls[i], titles[i]), visible=True)
|
206 |
)
|
207 |
affinity.append(
|
208 |
gr.update(value='#### Affinity Score: %0.3f'%doc_scores[i], visible=True) # document affinity
|
209 |
)
|
|
|
|
|
|
|
210 |
demarc_lines.append(gr.Markdown.update(visible=True))
|
211 |
|
212 |
# fill in the rest as
|
@@ -238,6 +254,7 @@ def setup_outputs(info, top_papers_show, top_num_info_show):
|
|
238 |
# mark others not visible
|
239 |
title += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
240 |
affinity += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
|
|
241 |
demarc_lines += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
242 |
sent_pair_score += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
|
243 |
sent_text_query += [gr.Textbox.update(value='', visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
|
@@ -249,7 +266,7 @@ def setup_outputs(info, top_papers_show, top_num_info_show):
|
|
249 |
assert(len(affinity) == NUM_PAPERS_SHOW)
|
250 |
assert(len(sent_pair_score) == NUM_PAIRS_SHOW * NUM_PAPERS_SHOW)
|
251 |
|
252 |
-
return title, affinity, demarc_lines, sent_pair_score, sent_text_query, sent_text_candidate, sent_hl_query, sent_hl_candidate
|
253 |
|
254 |
def show_more(info):
|
255 |
# show the interactive part of the app
|
@@ -259,6 +276,7 @@ def show_more(info):
|
|
259 |
gr.update(visible=True), # submission sentences
|
260 |
gr.update(visible=True), # title row
|
261 |
gr.update(visible=True), # affinity row
|
|
|
262 |
gr.update(visible=True), # highlight legend
|
263 |
gr.update(visible=True), # highlight slider
|
264 |
gr.update(visible=True), # highlight abstract
|
@@ -298,18 +316,21 @@ def change_paper(
|
|
298 |
if len(info.keys()) != 0: # if the info is not empty
|
299 |
source_sents = info[selected_papers_radio]['source_sentences']
|
300 |
title = info[selected_papers_radio]['title']
|
|
|
|
|
301 |
num_sents = info[selected_papers_radio]['num_cand_sents']
|
302 |
abstract = info[selected_papers_radio]['abstract']
|
303 |
aff_score = info[selected_papers_radio]['doc_score']
|
304 |
highlights = info[selected_papers_radio]['highlight']
|
305 |
url = info[selected_papers_radio]['url']
|
306 |
-
title_out = """<a href="%s" target="_blank"><h5>%s</h5></a>"""%(url, title)
|
307 |
aff_score_out = '##### Affinity Score: %s'%aff_score
|
|
|
308 |
idx = source_sents.index(source_sent_choice)
|
309 |
if highlight_slider <= num_sents:
|
310 |
-
return title_out, abstract, aff_score_out, highlights[str(idx)][str(highlight_slider)], gr.update(value=highlight_slider, maximum=num_sents)
|
311 |
else: # if the slider is set to more than the current number of sentences, show the max number of highlights
|
312 |
-
return title_out, abstract, aff_score_out, highlights[str(idx)][str(num_sents)], gr.update(value=num_sents, maximum=num_sents)
|
313 |
else:
|
314 |
return
|
315 |
|
@@ -334,7 +355,7 @@ def change_top_output(top_paper_slider, top_pair_slider, info={}):
|
|
334 |
result1_desc_value = """
|
335 |
<h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
|
336 |
|
337 |
-
For each paper, top %d sentence pairs (one from the submission, one from the paper) with the highest relevance scores are shown.
|
338 |
|
339 |
**<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
|
340 |
"""%(int(top_paper_slider), info['author_url'], info['name'], int(top_pair_slider))
|
@@ -363,9 +384,9 @@ It is for meta-reviewers, area chairs, program chairs, or anyone who oversees th
|
|
363 |
<center><img src="file/tool-img.jpeg" width="70%" alt="general workflow"></center>
|
364 |
|
365 |
#### How does it help?
|
366 |
-
A typical meta-reviewer workflow lacks supportive information on **what makes the pre-selected candidate reviewers a good fit** for the submission. Only affinity scores between the reviewer and the paper are shown, without additional
|
367 |
|
368 |
-
R2P2 provides more information about each reviewer.
|
369 |
"""
|
370 |
# More details (video, addendum)
|
371 |
more_details_instruction = """Check out <a href="https://drive.google.com/file/d/1Ex_-cOplBitO7riNGliecFc8H3chXUN-/view?usp=share_link", target="_blank">this video</a> for a quick introduction of what R2P2 is and how it can help. You can find more details <a href="file/details.html", target="_blank">here</a>, along with our privacy policy and disclaimer."""
|
@@ -374,6 +395,33 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
374 |
gr.HTML(more_details_instruction)
|
375 |
gr.Markdown("""---""")
|
376 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
377 |
### INPUT
|
378 |
with gr.Row() as input_row:
|
379 |
with gr.Column(scale=3):
|
@@ -388,15 +436,17 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
388 |
name = gr.Textbox(label='Confirm Reviewer Name', info='This will be automatically updated based on the reviewer profile link above', interactive=False)
|
389 |
author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
|
390 |
|
391 |
-
# Add examples
|
392 |
-
example_title ="The Toronto Paper Matching System: An automated paper-reviewer assignment system"
|
393 |
-
example_submission = """One of the most important tasks of conference organizers is the assignment of papers to reviewers. Reviewers' assessments of papers is a crucial step in determining the conference program, and in a certain sense to shape the direction of a field. However this is not a simple task: large conferences typically have to assign hundreds of papers to hundreds of reviewers, and time constraints make the task impossible for one person to accomplish. Furthermore other constraints, such as reviewer load have to be taken into account, preventing the process from being completely distributed. We built the first version of a system to suggest reviewer assignments for the NIPS 2010 conference, followed, in 2012, by a release that better integrated our system with Microsoft's popular Conference Management Toolkit (CMT). Since then our system has been widely adopted by the leading conferences in both the machine learning and computer vision communities. This paper provides an overview of the system, a summary of learning models and methods of evaluation that we have been using, as well as some of the recent progress and open issues."""
|
394 |
-
example_reviewer = "https://www.semanticscholar.org/author/Nihar-B.-Shah/1737249"
|
395 |
gr.Examples(
|
396 |
-
examples=[
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
inputs=[title_input, abstract_text_input, author_id_input],
|
398 |
cache_examples=False,
|
399 |
-
label="Try out the following example input."
|
400 |
)
|
401 |
|
402 |
with gr.Row():
|
@@ -409,16 +459,18 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
409 |
# Paper title, score, and top-ranking sentence pairs
|
410 |
# a knob for controlling the number of output displayed
|
411 |
with gr.Row():
|
412 |
-
with gr.Column(scale=
|
413 |
result1_desc = gr.Markdown(value='', visible=False)
|
414 |
with gr.Column(scale=2):
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
|
|
419 |
|
420 |
paper_title_up = []
|
421 |
paper_affinity_up = []
|
|
|
422 |
sent_pair_score = []
|
423 |
sent_text_query = []
|
424 |
sent_text_candidate = []
|
@@ -434,6 +486,9 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
434 |
with gr.Column(scale=3):
|
435 |
tt = gr.Markdown(value='', visible=False)
|
436 |
paper_title_up.append(tt)
|
|
|
|
|
|
|
437 |
with gr.Column(scale=1):
|
438 |
aff = gr.Markdown(value='', visible=False)
|
439 |
paper_affinity_up.append(aff)
|
@@ -443,12 +498,14 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
443 |
sps = gr.Markdown(value='', visible=False)
|
444 |
sent_pair_score.append(sps)
|
445 |
with gr.Column(scale=5):
|
446 |
-
stq = gr.Textbox(label='Sentence from Submission', visible=False)
|
|
|
447 |
shq = gr.components.Interpretation(stq, visible=False)
|
448 |
sent_text_query.append(stq)
|
449 |
sent_hl_query.append(shq)
|
450 |
with gr.Column(scale=5):
|
451 |
-
stc = gr.Textbox(label="Sentence from Reviewer's Paper", visible=False)
|
|
|
452 |
shc = gr.components.Interpretation(stc, visible=False)
|
453 |
sent_text_candidate.append(stc)
|
454 |
sent_hl_candidate.append(shc)
|
@@ -458,7 +515,7 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
458 |
|
459 |
## Show more button
|
460 |
with gr.Row():
|
461 |
-
see_more_rel_btn = gr.Button('Explore
|
462 |
|
463 |
### PAPER INFORMATION
|
464 |
|
@@ -474,7 +531,7 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
474 |
"""
|
475 |
#---"""
|
476 |
# show multiple papers in radio check box to select from
|
477 |
-
paper_abstract = gr.Textbox(label='
|
478 |
with gr.Row():
|
479 |
with gr.Column(scale=1):
|
480 |
selected_papers_radio = gr.Radio(
|
@@ -510,6 +567,9 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
510 |
with gr.Row(visible=False) as aff_row:
|
511 |
# selected paper's affinity score
|
512 |
affinity = gr.Markdown(value='')
|
|
|
|
|
|
|
513 |
with gr.Row(visible=False) as hl_row:
|
514 |
# highlighted text from paper
|
515 |
highlight = gr.components.Interpretation(paper_abstract)
|
@@ -526,7 +586,7 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
526 |
]
|
527 |
|
528 |
init_result_components = \
|
529 |
-
paper_title_up + paper_affinity_up + demarc_lines + sent_pair_score + \
|
530 |
sent_text_query + sent_text_candidate + sent_hl_query + sent_hl_candidate
|
531 |
|
532 |
explore_more_components = [
|
@@ -569,6 +629,7 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
569 |
source_sentences,
|
570 |
title_row,
|
571 |
aff_row,
|
|
|
572 |
highlight_legend,
|
573 |
highlight_slider,
|
574 |
hl_row,
|
@@ -600,6 +661,7 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
600 |
paper_title,
|
601 |
paper_abstract,
|
602 |
affinity,
|
|
|
603 |
highlight,
|
604 |
highlight_slider
|
605 |
]
|
@@ -642,4 +704,5 @@ R2P2 provides more information about each reviewer. It searches for the **most r
|
|
642 |
)
|
643 |
|
644 |
if __name__ == "__main__":
|
645 |
-
demo.queue().launch() # add ?__theme=light to force light mode
|
|
|
|
53 |
# print('computing document scores...')
|
54 |
#progress(0.5, desc="Computing document scores...")
|
55 |
# TODO detect duplicate papers?
|
56 |
+
titles, abstracts, paper_urls, doc_scores, paper_years, paper_citations = compute_document_score(
|
57 |
doc_model,
|
58 |
tokenizer,
|
59 |
title_input,
|
|
|
68 |
'titles': titles,
|
69 |
'abstracts': abstracts,
|
70 |
'urls': paper_urls,
|
71 |
+
'doc_scores': doc_scores,
|
72 |
+
'years': paper_years,
|
73 |
+
'citations': paper_citations,
|
74 |
}
|
75 |
|
76 |
# Select top 10 papers to show
|
|
|
78 |
abstracts = abstracts[:10]
|
79 |
doc_scores = doc_scores[:10]
|
80 |
paper_urls = paper_urls[:10]
|
81 |
+
paper_years = paper_years[:10]
|
82 |
+
paper_citations = paper_citations[:10]
|
83 |
|
84 |
display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(titles, doc_scores)]
|
85 |
end = time.time()
|
|
|
123 |
'source_sentences': input_sentences,
|
124 |
'highlight': word_scores,
|
125 |
'top_pairs': top_pairs_info,
|
126 |
+
'url': url,
|
127 |
+
'year': paper_years[aa],
|
128 |
+
'citations': paper_citations[aa],
|
129 |
}
|
130 |
|
131 |
end = time.time()
|
|
|
133 |
print('done in [%0.2f] seconds'%(highlight_time))
|
134 |
|
135 |
## Set up output elements
|
|
|
136 |
## Components for Initial Part
|
137 |
result1_desc_value = """
|
138 |
<h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
|
139 |
|
140 |
+
For each paper, top %d sentence pairs (one from the submission on the left, one from the paper on the right) with the highest relevance scores are shown.
|
141 |
|
142 |
**<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
|
143 |
"""%(int(top_paper_slider), author_id_input, results['name'], int(top_pair_slider))
|
|
|
166 |
title_out = """<a href="%s" target="_blank"><h5>%s</h5></a>"""%(url, title)
|
167 |
aff_score_out = '##### Affinity Score: %s'%aff_score
|
168 |
result2_desc_value = """
|
169 |
+
##### Click a paper by %s (left, sorted by affinity scores), and a sentence from the submission abstract (center), to see which parts of the paper's abstract are relevant (right).
|
170 |
"""%results['name']
|
171 |
out3 = [
|
172 |
gr.update(choices=display_title, value=display_title[0], interactive=True), # set of papers (radio)
|
|
|
174 |
gr.update(value=title_out), # paper_title
|
175 |
gr.update(value=aff_score_out), # affinity
|
176 |
gr.update(value=result2_desc_value), # result 2 description (show more section)
|
177 |
+
gr.update(value=2, maximum=len(sent_tokenize(abstracts[0]))), # highlight slider to control
|
178 |
]
|
179 |
|
180 |
+
torch.cuda.empty_cache()
|
181 |
+
|
182 |
## Return by adding the State variable info
|
183 |
return out1 + out2 + out3 + [results]
|
184 |
|
|
|
186 |
titles = info['titles']
|
187 |
doc_scores = info['doc_scores']
|
188 |
paper_urls = info['urls']
|
189 |
+
paper_years = info['years']
|
190 |
+
paper_citations = info['citations']
|
191 |
display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(info['titles'], info['doc_scores'])]
|
192 |
title = []
|
193 |
affinity = []
|
194 |
+
citation_count = []
|
195 |
sent_pair_score = []
|
196 |
sent_text_query = []
|
197 |
sent_text_candidate = []
|
|
|
201 |
for i in range(top_papers_show):
|
202 |
if i == 0:
|
203 |
title.append(
|
204 |
+
gr.update(value="""<a href="%s" target="_blank"><h4>%s (%s)</h4></a>"""%(paper_urls[i], titles[i], str(paper_years[i])), visible=True)
|
205 |
)
|
206 |
affinity.append(
|
207 |
gr.update(value="""#### Affinity Score: %0.3f
|
208 |
<div class="help-tip">
|
209 |
+
<p>Measures how similar the paper's abstract is to the submission abstract.</p>
|
210 |
</div>
|
211 |
"""%doc_scores[i], visible=True) # document affinity
|
212 |
)
|
213 |
+
citation_count.append(
|
214 |
+
gr.update(value="""#### Citation Count: %d"""%paper_citations[i], visible=True) # document affinity
|
215 |
+
)
|
216 |
else:
|
217 |
title.append(
|
218 |
+
gr.update(value="""<a href="%s" target="_blank"><h4>%s (%s)</h4></a>"""%(paper_urls[i], titles[i], str(paper_years[i])), visible=True)
|
219 |
)
|
220 |
affinity.append(
|
221 |
gr.update(value='#### Affinity Score: %0.3f'%doc_scores[i], visible=True) # document affinity
|
222 |
)
|
223 |
+
citation_count.append(
|
224 |
+
gr.update(value="""#### Citation Count: %d"""%paper_citations[i], visible=True) # document affinity
|
225 |
+
)
|
226 |
demarc_lines.append(gr.Markdown.update(visible=True))
|
227 |
|
228 |
# fill in the rest as
|
|
|
254 |
# mark others not visible
|
255 |
title += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
256 |
affinity += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
257 |
+
citation_count += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
258 |
demarc_lines += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
|
259 |
sent_pair_score += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
|
260 |
sent_text_query += [gr.Textbox.update(value='', visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
|
|
|
266 |
assert(len(affinity) == NUM_PAPERS_SHOW)
|
267 |
assert(len(sent_pair_score) == NUM_PAIRS_SHOW * NUM_PAPERS_SHOW)
|
268 |
|
269 |
+
return title, affinity, citation_count, demarc_lines, sent_pair_score, sent_text_query, sent_text_candidate, sent_hl_query, sent_hl_candidate
|
270 |
|
271 |
def show_more(info):
|
272 |
# show the interactive part of the app
|
|
|
276 |
gr.update(visible=True), # submission sentences
|
277 |
gr.update(visible=True), # title row
|
278 |
gr.update(visible=True), # affinity row
|
279 |
+
gr.update(visible=True), # citation row
|
280 |
gr.update(visible=True), # highlight legend
|
281 |
gr.update(visible=True), # highlight slider
|
282 |
gr.update(visible=True), # highlight abstract
|
|
|
316 |
if len(info.keys()) != 0: # if the info is not empty
|
317 |
source_sents = info[selected_papers_radio]['source_sentences']
|
318 |
title = info[selected_papers_radio]['title']
|
319 |
+
year = info[selected_papers_radio]['year']
|
320 |
+
citation_count = info[selected_papers_radio]['citations']
|
321 |
num_sents = info[selected_papers_radio]['num_cand_sents']
|
322 |
abstract = info[selected_papers_radio]['abstract']
|
323 |
aff_score = info[selected_papers_radio]['doc_score']
|
324 |
highlights = info[selected_papers_radio]['highlight']
|
325 |
url = info[selected_papers_radio]['url']
|
326 |
+
title_out = """<a href="%s" target="_blank"><h5>%s (%s)</h5></a>"""%(url, title, str(year))
|
327 |
aff_score_out = '##### Affinity Score: %s'%aff_score
|
328 |
+
citation_count_out = '##### Citation Count: %s'%citation_count
|
329 |
idx = source_sents.index(source_sent_choice)
|
330 |
if highlight_slider <= num_sents:
|
331 |
+
return title_out, abstract, aff_score_out, citation_count_out, highlights[str(idx)][str(highlight_slider)], gr.update(value=highlight_slider, maximum=num_sents)
|
332 |
else: # if the slider is set to more than the current number of sentences, show the max number of highlights
|
333 |
+
return title_out, abstract, aff_score_out, citation_count_out, highlights[str(idx)][str(num_sents)], gr.update(value=num_sents, maximum=num_sents)
|
334 |
else:
|
335 |
return
|
336 |
|
|
|
355 |
result1_desc_value = """
|
356 |
<h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
|
357 |
|
358 |
+
For each paper, top %d sentence pairs (one from the submission on the left, one from the paper on the right) with the highest relevance scores are shown.
|
359 |
|
360 |
**<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
|
361 |
"""%(int(top_paper_slider), info['author_url'], info['name'], int(top_pair_slider))
|
|
|
384 |
<center><img src="file/tool-img.jpeg" width="70%" alt="general workflow"></center>
|
385 |
|
386 |
#### How does it help?
|
387 |
+
A typical meta-reviewer workflow lacks supportive information on **what makes the pre-selected candidate reviewers a good fit** for the submission. Only affinity scores between the reviewer and the paper are shown, without additional details on what makes them similar/different.
|
388 |
|
389 |
+
R2P2 provides more information about each reviewer. Given a paper and a reviewer, it searches for the **most relevant papers** among the reviewer's previous publications and **highlights relevant parts** within them.
|
390 |
"""
|
391 |
# More details (video, addendum)
|
392 |
more_details_instruction = """Check out <a href="https://drive.google.com/file/d/1Ex_-cOplBitO7riNGliecFc8H3chXUN-/view?usp=share_link", target="_blank">this video</a> for a quick introduction of what R2P2 is and how it can help. You can find more details <a href="file/details.html", target="_blank">here</a>, along with our privacy policy and disclaimer."""
|
|
|
395 |
gr.HTML(more_details_instruction)
|
396 |
gr.Markdown("""---""")
|
397 |
|
398 |
+
# Add main example
|
399 |
+
example_title ="The Toronto Paper Matching System: An automated paper-reviewer assignment system"
|
400 |
+
example_submission = """One of the most important tasks of conference organizers is the assignment of papers to reviewers. Reviewers' assessments of papers is a crucial step in determining the conference program, and in a certain sense to shape the direction of a field. However this is not a simple task: large conferences typically have to assign hundreds of papers to hundreds of reviewers, and time constraints make the task impossible for one person to accomplish. Furthermore other constraints, such as reviewer load have to be taken into account, preventing the process from being completely distributed. We built the first version of a system to suggest reviewer assignments for the NIPS 2010 conference, followed, in 2012, by a release that better integrated our system with Microsoft's popular Conference Management Toolkit (CMT). Since then our system has been widely adopted by the leading conferences in both the machine learning and computer vision communities. This paper provides an overview of the system, a summary of learning models and methods of evaluation that we have been using, as well as some of the recent progress and open issues."""
|
401 |
+
example_reviewer = "https://www.semanticscholar.org/author/Nihar-B.-Shah/1737249"
|
402 |
+
|
403 |
+
## Add other examples for the task
|
404 |
+
|
405 |
+
# match 1
|
406 |
+
# example1_title = "VoroCNN: Deep convolutional neural network built on 3D Voronoi tessellation of protein structures"
|
407 |
+
# example1_submission = """Effective use of evolutionary information has recently led to tremendous progress in computational prediction of three-dimensional (3D) structures of proteins and their complexes. Despite the progress, the accuracy of predicted structures tends to vary considerably from case to case. Since the utility of computational models depends on their accuracy, reliable estimates of deviation between predicted and native structures are of utmost importance. Results For the first time we present a deep convolutional neural network (CNN) constructed on a Voronoi tessellation of 3D molecular structures. Despite the irregular data domain, our data representation allows to efficiently introduce both convolution and pooling operations of the network. We trained our model, called VoroCNN, to predict local qualities of 3D protein folds. The prediction results are competitive to the state of the art and superior to the previous 3D CNN architectures built for the same task. We also discuss practical applications of VoroCNN, for example, in the recognition of protein binding interfaces. Availability The model, data, and evaluation tests are available at https://team.inria.fr/nano-d/software/vorocnn/. Contact ceslovas.venclovas@bti.vu.lt, sergei.grudinin@inria.fr"""
|
408 |
+
# example1_reviewer = "https://www.semanticscholar.org/author/2025052385"
|
409 |
+
|
410 |
+
# # match 2
|
411 |
+
# example2_title = "Model-based Policy Optimization with Unsupervised Model Adaptation"
|
412 |
+
# example2_submission = """Model-based reinforcement learning methods learn a dynamics model with real data sampled from the environment and leverage it to generate simulated data to derive an agent. However, due to the potential distribution mismatch between simulated data and real data, this could lead to degraded performance. Despite much effort being devoted to reducing this distribution mismatch, existing methods fail to solve it explicitly. In this paper, we investigate how to bridge the gap between real and simulated data due to inaccurate model estimation for better policy optimization. To begin with, we first derive a lower bound of the expected return, which naturally inspires a bound maximization algorithm by aligning the simulated and real data distributions. To this end, we propose a novel model-based reinforcement learning framework AMPO, which introduces unsupervised model adaptation to minimize the integral probability metric (IPM) between feature distributions from real and simulated data. Instantiating our framework with Wasserstein-1 distance gives a practical model-based approach. Empirically, our approach achieves state-of-the-art performance in terms of sample efficiency on a range of continuous control benchmark tasks."""
|
413 |
+
# example2_reviewer = "https://www.semanticscholar.org/author/144974941"
|
414 |
+
|
415 |
+
# # match 3
|
416 |
+
# example3_title = "Sharp asymptotic and finite-sample rates of convergence of empirical measures in Wasserstein distance"
|
417 |
+
# example3_submission = """The Wasserstein distance between two probability measures on a metric space is a measure of closeness with applications in statistics, probability, and machine learning. In this work, we consider the fundamental question of how quickly the empirical measure obtained from $n$ independent samples from $\mu$ approaches $\mu$ in the Wasserstein distance of any order. We prove sharp asymptotic and finite-sample results for this rate of convergence for general measures on general compact metric spaces. Our finite-sample results show the existence of multi-scale behavior, where measures can exhibit radically different rates of convergence as $n$ grows."""
|
418 |
+
# example3_reviewer = "https://www.semanticscholar.org/author/27911143"
|
419 |
+
|
420 |
+
# # match 4
|
421 |
+
# example4_title = "Deep Neural Networks for Estimation and Inference: Application to Causal Effects and Other Semiparametric Estimands"
|
422 |
+
# example4_submission = """We study deep neural networks and their use in semiparametric inference. We prove valid inference after first-step estimation with deep learning, a result new to the literature. We provide new rates of convergence for deep feedforward neural nets and, because our rates are sufficiently fast (in some cases minimax optimal), obtain valid semiparametric inference. Our estimation rates and semiparametric inference results handle the current standard architecture: fully connected feedforward neural networks (multi-layer perceptrons), with the now-common rectified linear unit activation function and a depth explicitly diverging with the sample size. We discuss other architectures as well, including fixed-width, very deep networks. We establish nonasymptotic bounds for these deep nets for nonparametric regression, covering the standard least squares and logistic losses in particular. We then apply our theory to develop semiparametric inference, focusing on treatment effects, expected welfare, and decomposition effects for concreteness. Inference in many other semiparametric contexts can be readily obtained. We demonstrate the effectiveness of deep learning with a Monte Carlo analysis and an empirical application to direct mail marketing."""
|
423 |
+
# example4_reviewer = "https://www.semanticscholar.org/author/3364789"
|
424 |
+
|
425 |
### INPUT
|
426 |
with gr.Row() as input_row:
|
427 |
with gr.Column(scale=3):
|
|
|
436 |
name = gr.Textbox(label='Confirm Reviewer Name', info='This will be automatically updated based on the reviewer profile link above', interactive=False)
|
437 |
author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
|
438 |
|
|
|
|
|
|
|
|
|
439 |
gr.Examples(
|
440 |
+
examples=[
|
441 |
+
[example_title, example_submission, example_reviewer],
|
442 |
+
# [example1_title, example1_submission, example1_reviewer],
|
443 |
+
# [example2_title, example2_submission, example2_reviewer],
|
444 |
+
# [example3_title, example3_submission, example3_reviewer],
|
445 |
+
# [example4_title, example4_submission, example4_reviewer],
|
446 |
+
],
|
447 |
inputs=[title_input, abstract_text_input, author_id_input],
|
448 |
cache_examples=False,
|
449 |
+
label="Try out the following example input. Click on a row to fill in the input fields accordingly."
|
450 |
)
|
451 |
|
452 |
with gr.Row():
|
|
|
459 |
# Paper title, score, and top-ranking sentence pairs
|
460 |
# a knob for controlling the number of output displayed
|
461 |
with gr.Row():
|
462 |
+
with gr.Column(scale=3):
|
463 |
result1_desc = gr.Markdown(value='', visible=False)
|
464 |
with gr.Column(scale=2):
|
465 |
+
# with gr.Row():
|
466 |
+
top_paper_slider = gr.Slider(label='Number of papers to show', value=3, minimum=3, step=1, maximum=NUM_PAPERS_SHOW, visible=False)
|
467 |
+
with gr.Column(scale=2):
|
468 |
+
#with gr.Row():
|
469 |
+
top_pair_slider = gr.Slider(label='Number of sentence pairs to show', value=2, minimum=2, step=1, maximum=NUM_PAIRS_SHOW, visible=False)
|
470 |
|
471 |
paper_title_up = []
|
472 |
paper_affinity_up = []
|
473 |
+
citation_count = []
|
474 |
sent_pair_score = []
|
475 |
sent_text_query = []
|
476 |
sent_text_candidate = []
|
|
|
486 |
with gr.Column(scale=3):
|
487 |
tt = gr.Markdown(value='', visible=False)
|
488 |
paper_title_up.append(tt)
|
489 |
+
with gr.Column(scale=1):
|
490 |
+
cc = gr.Markdown(value='', visible=False)
|
491 |
+
citation_count.append(cc)
|
492 |
with gr.Column(scale=1):
|
493 |
aff = gr.Markdown(value='', visible=False)
|
494 |
paper_affinity_up.append(aff)
|
|
|
498 |
sps = gr.Markdown(value='', visible=False)
|
499 |
sent_pair_score.append(sps)
|
500 |
with gr.Column(scale=5):
|
501 |
+
#stq = gr.Textbox(label='Sentence from Submission', visible=False)
|
502 |
+
stq = gr.Textbox(label='', visible=False)
|
503 |
shq = gr.components.Interpretation(stq, visible=False)
|
504 |
sent_text_query.append(stq)
|
505 |
sent_hl_query.append(shq)
|
506 |
with gr.Column(scale=5):
|
507 |
+
#stc = gr.Textbox(label="Sentence from Reviewer's Paper", visible=False)
|
508 |
+
stc = gr.Textbox(label="", visible=False)
|
509 |
shc = gr.components.Interpretation(stc, visible=False)
|
510 |
sent_text_candidate.append(stc)
|
511 |
sent_hl_candidate.append(shc)
|
|
|
515 |
|
516 |
## Show more button
|
517 |
with gr.Row():
|
518 |
+
see_more_rel_btn = gr.Button('Not Enough Information? Explore More', visible=False)
|
519 |
|
520 |
### PAPER INFORMATION
|
521 |
|
|
|
531 |
"""
|
532 |
#---"""
|
533 |
# show multiple papers in radio check box to select from
|
534 |
+
paper_abstract = gr.Textbox(label='', interactive=False, visible=False)
|
535 |
with gr.Row():
|
536 |
with gr.Column(scale=1):
|
537 |
selected_papers_radio = gr.Radio(
|
|
|
567 |
with gr.Row(visible=False) as aff_row:
|
568 |
# selected paper's affinity score
|
569 |
affinity = gr.Markdown(value='')
|
570 |
+
with gr.Row(visible=False) as cite_row:
|
571 |
+
# selected paper's citation count
|
572 |
+
citation = gr.Markdown(value='')
|
573 |
with gr.Row(visible=False) as hl_row:
|
574 |
# highlighted text from paper
|
575 |
highlight = gr.components.Interpretation(paper_abstract)
|
|
|
586 |
]
|
587 |
|
588 |
init_result_components = \
|
589 |
+
paper_title_up + paper_affinity_up + citation_count + demarc_lines + sent_pair_score + \
|
590 |
sent_text_query + sent_text_candidate + sent_hl_query + sent_hl_candidate
|
591 |
|
592 |
explore_more_components = [
|
|
|
629 |
source_sentences,
|
630 |
title_row,
|
631 |
aff_row,
|
632 |
+
cite_row,
|
633 |
highlight_legend,
|
634 |
highlight_slider,
|
635 |
hl_row,
|
|
|
661 |
paper_title,
|
662 |
paper_abstract,
|
663 |
affinity,
|
664 |
+
citation,
|
665 |
highlight,
|
666 |
highlight_slider
|
667 |
]
|
|
|
704 |
)
|
705 |
|
706 |
if __name__ == "__main__":
|
707 |
+
#demo.queue().launch(debug=True) # add ?__theme=light to force light mode
|
708 |
+
demo.queue().launch(share=True) # add ?__theme=light to force light mode
|
input_format.py
CHANGED
@@ -1,67 +1,9 @@
|
|
1 |
-
from pypdf import PdfReader
|
2 |
from urllib.parse import urlparse
|
3 |
import requests
|
4 |
from semanticscholar import SemanticScholar
|
5 |
|
6 |
-
### Input Formatting
|
7 |
|
8 |
-
## Input formatting for the given paper
|
9 |
-
# Extracting text from a pdf or a link
|
10 |
-
|
11 |
-
def get_text_from_pdf(file_path):
|
12 |
-
"""
|
13 |
-
Convert a pdf to list of text files
|
14 |
-
"""
|
15 |
-
reader = PdfReader(file_path)
|
16 |
-
text = []
|
17 |
-
for p in reader.pages:
|
18 |
-
t = p.extract_text()
|
19 |
-
text.append(t)
|
20 |
-
return text
|
21 |
-
|
22 |
-
def get_text_from_url(url, file_path='paper.pdf'):
|
23 |
-
"""
|
24 |
-
Get text of the paper from a url
|
25 |
-
"""
|
26 |
-
## Check for different URL cases
|
27 |
-
url_parts = urlparse(url)
|
28 |
-
# arxiv
|
29 |
-
if 'arxiv' in url_parts.netloc:
|
30 |
-
if 'abs' in url_parts.path:
|
31 |
-
# abstract page, change the url to pdf link
|
32 |
-
paper_id = url_parts.path.split('/')[-1]
|
33 |
-
url = 'https://www.arxiv.org/pdf/%s.pdf'%(paper_id)
|
34 |
-
elif 'pdf' in url_parts.path:
|
35 |
-
# pdf file, pass
|
36 |
-
pass
|
37 |
-
else:
|
38 |
-
raise ValueError('invalid url')
|
39 |
-
else:
|
40 |
-
raise ValueError('invalid url')
|
41 |
-
|
42 |
-
# download the file
|
43 |
-
download_pdf(url, file_path)
|
44 |
-
|
45 |
-
# get the text from the pdf file
|
46 |
-
text = get_text_from_pdf(file_path)
|
47 |
-
return text
|
48 |
-
|
49 |
-
def download_pdf(url, file_name):
|
50 |
-
"""
|
51 |
-
Download the pdf file from given url and save it as file_name
|
52 |
-
"""
|
53 |
-
# Send GET request
|
54 |
-
response = requests.get(url)
|
55 |
-
|
56 |
-
# Save the PDF
|
57 |
-
if response.status_code == 200:
|
58 |
-
with open(file_name, "wb") as f:
|
59 |
-
f.write(response.content)
|
60 |
-
elif response.status_code == 404:
|
61 |
-
raise ValueError('cannot download the file')
|
62 |
-
else:
|
63 |
-
print(response.status_code)
|
64 |
-
|
65 |
## Input formatting for the given author (reviewer)
|
66 |
# Extracting text from a link
|
67 |
|
@@ -71,8 +13,9 @@ def get_text_from_author_id(author_id, max_count=150):
|
|
71 |
aid = str(author_id)
|
72 |
if 'http' in aid: # handle semantic scholar url input
|
73 |
aid = aid.split('/')
|
74 |
-
aid = aid[
|
75 |
-
|
|
|
76 |
r = requests.get(url)
|
77 |
if r.status_code == 404:
|
78 |
raise ValueError('Author link not found.')
|
|
|
|
|
1 |
from urllib.parse import urlparse
|
2 |
import requests
|
3 |
from semanticscholar import SemanticScholar
|
4 |
|
5 |
+
### Input Formatting
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
## Input formatting for the given author (reviewer)
|
8 |
# Extracting text from a link
|
9 |
|
|
|
13 |
aid = str(author_id)
|
14 |
if 'http' in aid: # handle semantic scholar url input
|
15 |
aid = aid.split('/')
|
16 |
+
aid = aid[-1]
|
17 |
+
# aid = aid[aid.index('author')+2]
|
18 |
+
url = "https://api.semanticscholar.org/graph/v1/author/%s?fields=url,name,paperCount,papers,papers.title,papers.abstract,papers.url,papers.year,papers.citationCount"%aid
|
19 |
r = requests.get(url)
|
20 |
if r.status_code == 404:
|
21 |
raise ValueError('Author link not found.')
|
requirements.txt
CHANGED
@@ -1,16 +1,11 @@
|
|
1 |
-
gradio==3.
|
2 |
-
huggingface-hub
|
3 |
nltk==3.7
|
4 |
-
numpy
|
5 |
-
py-pdf-parser==0.10.2
|
6 |
-
py-rouge==1.1
|
7 |
-
pypdf==3.3.0
|
8 |
-
pyrogue==0.0.2
|
9 |
requests==2.28.1
|
10 |
-
rouge-score==0.1.2
|
11 |
semanticscholar==0.3.2
|
12 |
-
sentence-transformers==2.2.
|
13 |
torch==1.9.0
|
14 |
-
transformers
|
15 |
urllib3==1.26.6
|
16 |
-
tqdm
|
|
|
1 |
+
gradio==3.24.1
|
2 |
+
huggingface-hub
|
3 |
nltk==3.7
|
4 |
+
numpy
|
|
|
|
|
|
|
|
|
5 |
requests==2.28.1
|
|
|
6 |
semanticscholar==0.3.2
|
7 |
+
sentence-transformers==2.2.2
|
8 |
torch==1.9.0
|
9 |
+
transformers==4.27.4
|
10 |
urllib3==1.26.6
|
11 |
+
tqdm
|
score.py
CHANGED
@@ -338,11 +338,15 @@ def compute_document_score(doc_model, tokenizer, query_title, query_abs, papers,
|
|
338 |
titles = []
|
339 |
abstracts = []
|
340 |
urls = []
|
|
|
|
|
341 |
for p in papers:
|
342 |
if p['title'] is not None and p['abstract'] is not None:
|
343 |
titles.append(p['title'])
|
344 |
abstracts.append(p['abstract'])
|
345 |
urls.append(p['url'])
|
|
|
|
|
346 |
if query_title == '':
|
347 |
query = query_abs
|
348 |
else:
|
@@ -355,5 +359,7 @@ def compute_document_score(doc_model, tokenizer, query_title, query_abs, papers,
|
|
355 |
abstracts_sorted = [abstracts[x] for x in idx_sorted]
|
356 |
scores_sorted = [scores[x] for x in idx_sorted]
|
357 |
urls_sorted = [urls[x] for x in idx_sorted]
|
|
|
|
|
358 |
|
359 |
-
return titles_sorted, abstracts_sorted, urls_sorted, scores_sorted
|
|
|
338 |
titles = []
|
339 |
abstracts = []
|
340 |
urls = []
|
341 |
+
years = []
|
342 |
+
citations = []
|
343 |
for p in papers:
|
344 |
if p['title'] is not None and p['abstract'] is not None:
|
345 |
titles.append(p['title'])
|
346 |
abstracts.append(p['abstract'])
|
347 |
urls.append(p['url'])
|
348 |
+
years.append(p['year'])
|
349 |
+
citations.append(p['citationCount'])
|
350 |
if query_title == '':
|
351 |
query = query_abs
|
352 |
else:
|
|
|
359 |
abstracts_sorted = [abstracts[x] for x in idx_sorted]
|
360 |
scores_sorted = [scores[x] for x in idx_sorted]
|
361 |
urls_sorted = [urls[x] for x in idx_sorted]
|
362 |
+
years_sorted = [years[x] for x in idx_sorted]
|
363 |
+
citations_sorted = [citations[x] for x in idx_sorted]
|
364 |
|
365 |
+
return titles_sorted, abstracts_sorted, urls_sorted, scores_sorted, years_sorted, citations_sorted
|
style.css
CHANGED
@@ -44,7 +44,7 @@
|
|
44 |
box-shadow: 1px 1px 1px rgba(0, 0, 0, 0.2);
|
45 |
right: -4px;
|
46 |
color: #FFF;
|
47 |
-
font-size:
|
48 |
line-height: 1.4;
|
49 |
}
|
50 |
|
|
|
44 |
box-shadow: 1px 1px 1px rgba(0, 0, 0, 0.2);
|
45 |
right: -4px;
|
46 |
color: #FFF;
|
47 |
+
font-size: 11px;
|
48 |
line-height: 1.4;
|
49 |
}
|
50 |
|