File size: 8,045 Bytes
6eff5e7
 
 
 
 
4bea31b
b1499f3
 
05a7bdc
6eff5e7
 
 
 
 
580aef7
 
6eff5e7
 
 
580aef7
6eff5e7
 
 
580aef7
6eff5e7
 
 
 
 
 
 
05a7bdc
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
05a7bdc
963bf46
6eff5e7
 
 
 
580aef7
6eff5e7
 
 
 
 
 
 
e16ae7e
6eff5e7
 
 
 
 
 
e16ae7e
05a7bdc
 
963bf46
 
 
 
 
 
 
 
6eff5e7
 
 
 
 
 
 
05a7bdc
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
963bf46
 
6eff5e7
 
 
 
963bf46
6eff5e7
 
 
 
 
 
05a7bdc
 
6eff5e7
 
 
 
 
 
 
963bf46
6eff5e7
 
 
 
963bf46
e16ae7e
 
6eff5e7
 
 
e16ae7e
6eff5e7
 
 
 
 
e16ae7e
963bf46
e16ae7e
 
 
 
 
 
 
963bf46
e16ae7e
 
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963bf46
 
 
e16ae7e
 
 
 
 
 
 
6eff5e7
963bf46
e16ae7e
6eff5e7
 
 
 
e16ae7e
 
6eff5e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963bf46
 
 
6eff5e7
 
 
 
 
 
 
 
e16ae7e
6eff5e7
e16ae7e
 
 
6eff5e7
 
 
963bf46
6eff5e7
 
 
 
 
 
 
 
 
 
963bf46
6eff5e7
 
 
 
 
e16ae7e
963bf46
e16ae7e
 
 
 
 
 
 
 
 
6eff5e7
 
4bea31b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import gradio as gr
import os
from transformers import AutoTokenizer, AutoModel
from sentence_transformers import SentenceTransformer
import pickle
import nltk
nltk.download('punkt') # tokenizer
nltk.download('averaged_perceptron_tagger') # postagger
import time

from input_format import *
from score import *

# load document scoring model
torch.cuda.is_available = lambda : False
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_model = 'allenai/specter'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
doc_model = AutoModel.from_pretrained(pretrained_model) 
doc_model.to(device)

# load sentence model 
sent_model = SentenceTransformer('sentence-transformers/gtr-t5-base')
sent_model.to(device)

def get_similar_paper(
    abstract_text_input, 
    pdf_file_input, 
    author_id_input, 
    num_papers_show=10
):
    print('retrieving similar papers...')
    start = time.time()
    input_sentences = sent_tokenize(abstract_text_input)
    
    # TODO handle pdf file input
    if pdf_file_input is not None:
        name = None
        papers = []
        raise ValueError('Use submission abstract instead.')
    else:
        # Get author papers from id
        name, papers = get_text_from_author_id(author_id_input)
    
    # Compute Doc-level affinity scores for the Papers 
    print('computing scores...')
    titles, abstracts, doc_scores = compute_document_score(
        doc_model, 
        tokenizer,
        abstract_text_input, 
        papers,
        batch=50
    )
    
    tmp = {
        'titles': titles,
        'abstracts': abstracts,
        'doc_scores': doc_scores
    }
    pickle.dump(tmp, open('paper_info.pkl', 'wb'))
    
    # Select top K choices of papers to show
    titles = titles[:num_papers_show]
    abstracts = abstracts[:num_papers_show]
    doc_scores = doc_scores[:num_papers_show]
    
    display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(titles, doc_scores)]
    end = time.time()
    print('retrieval complete in [%0.2f] seconds'%(end - start))
    
    return (
        gr.update(choices=display_title, interactive=True, visible=True), # set of papers
        gr.update(choices=input_sentences, interactive=True), # submission sentences
        gr.update(visible=True),    # title row
        gr.update(visible=True),    # abstract row
        gr.update(visible=True)     # button
    )

def get_highlights(
    abstract_text_input,
    pdf_file_input,
    abstract,
    K=2
):
    print('obtaining highlights..')
    start = time.time()
    # Compute sent-level and phrase-level affinity scores for each papers
    sent_ids, sent_scores, info = get_highlight_info(
        sent_model, 
        abstract_text_input, 
        abstract,
        K=K
    )

    input_sentences = sent_tokenize(abstract_text_input)
    num_sents = len(input_sentences)
     
    word_scores = dict()
    
    # different highlights for each input sentence
    for i in range(num_sents):
        word_scores[str(i)] = {
            "original": abstract,
            "interpretation": list(zip(info['all_words'], info[i]['scores']))
        } # format to feed to for Gradio Interpretation component
    
    tmp = {
        'source_sentences': input_sentences,
        'highlight': word_scores
    }
    pickle.dump(tmp, open('highlight_info.pkl', 'wb'))
    end = time.time()
    print('done in [%0.2f] seconds'%(end - start))
    
    # update the visibility of radio choices
    return gr.update(visible=True)

def update_name(author_id_input):
    # update the name of the author based on the id input
    name, _ = get_text_from_author_id(author_id_input)
    
    return gr.update(value=name)

def change_output_highlight(source_sent_choice):
    # change the output highlight based on the sentence selected from the submission
    fname = 'highlight_info.pkl'
    if os.path.exists(fname):
        tmp = pickle.load(open(fname, 'rb'))
        source_sents = tmp['source_sentences']
        highlights = tmp['highlight']
        for i, s in enumerate(source_sents):
            #print('changing highlight')
            if source_sent_choice == s:
                return highlights[str(i)]
    else:
        return

def change_paper(selected_papers_radio):
    # change the paper to show based on the paper selected
    fname = 'paper_info.pkl'
    if os.path.exists(fname):
        tmp = pickle.load(open(fname, 'rb'))
        for title, abstract, aff_score in zip(tmp['titles'], tmp['abstracts'], tmp['doc_scores']):
            display_title = '[ %0.3f ] %s'%(aff_score, title)
            if display_title == selected_papers_radio:
                #print('changing paper')
                return title, abstract, aff_score   # update title, abstract, and affinity score fields
    else:
        return

with gr.Blocks() as demo:
    
    ### INPUT
    with gr.Row() as input_row:
        with gr.Column():
            abstract_text_input = gr.Textbox(label='Submission Abstract')
        with gr.Column():
            pdf_file_input = gr.File(label='OR upload a submission PDF File')
        with gr.Column():
            with gr.Row():
                author_id_input = gr.Textbox(label='Reviewer ID (Semantic Scholar)')
            with gr.Row():
                name = gr.Textbox(label='Confirm Reviewer Name', interactive=False)
                author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
    with gr.Row():
        compute_btn = gr.Button('Search Similar Papers from the Reviewer')  
    
    ### PAPER INFORMATION
     
    # show multiple papers in radio check box to select from
    with gr.Row():
        selected_papers_radio = gr.Radio(
            choices=[], # will be udpated with the button click
            visible=False, # also will be updated with the button click
            label='Selected Top Papers from the Reviewer'
        )
    
    # selected paper information 
    with gr.Row(visible=False) as title_row:
        with gr.Column(scale=3):
            paper_title = gr.Textbox(label='Title', interactive=False)
        with gr.Column(scale=1):
            affinity= gr.Number(label='Affinity', interactive=False, value=0)
    with gr.Row(visibe=False) as abstract_row:
        paper_abstract = gr.Textbox(label='Abstract', interactive=False, visible=False)
        
    with gr.Row(visible=False) as explain_button_row:
        explain_btn = gr.Button('Show Relevant Parts from Selected Paper')
    
    ### RELEVANT PARTS (HIGHLIGHTS)
    
    with gr.Row(): 
        with gr.Column(scale=2): # text from submission
            source_sentences = gr.Radio(
                choices=[], 
                visible=False, 
                label='Sentences from Submission Abstract',
            )
        with gr.Column(scale=3): # highlighted text from paper
            highlight = gr.components.Interpretation(paper_abstract) 
    
    ### EVENT LISTENERS
    
    # retrieve similar papers
    compute_btn.click(
        fn=get_similar_paper,
        inputs=[
            abstract_text_input, 
            pdf_file_input, 
            author_id_input
        ],
        outputs=[
            selected_papers_radio,
            source_sentences,
            title_row,
            paper_abstract,
            explain_button_row,
        ]
    )      
    
    # get highlights
    explain_btn.click(
        fn=get_highlights,
        inputs=[
            abstract_text_input, 
            pdf_file_input,
            paper_abstract
        ],
        outputs=source_sentences
    )
    
    # change highlight based on selected sentences from submission
    source_sentences.change(
        fn=change_output_highlight,
        inputs=source_sentences,
        outputs=highlight
    )
    
    # change paper to show based on selected papers
    selected_papers_radio.change(
        fn=change_paper,
        inputs=selected_papers_radio,
        outputs= [
            paper_title,
            paper_abstract,
            affinity
        ]
    )

if __name__ == "__main__":
    demo.launch()