File size: 12,309 Bytes
429523a
 
 
 
2622539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7a657
2622539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429523a
 
 
 
 
 
 
 
 
 
 
 
8dcbe0a
 
 
 
 
 
 
 
 
 
 
70d9840
c8064dd
 
8dcbe0a
 
 
 
 
 
 
 
 
 
 
 
2622539
 
 
 
 
 
 
8dcbe0a
 
 
429523a
 
 
 
e908864
429523a
8dcbe0a
429523a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
830ecde
429523a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8dcbe0a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import streamlit as st


# Source: https://arxiv.org/category_taxonomy
arxiv_categories = {
    "cs": "computer science",
    "cs.AI": "artificial intelligence",
    "cs.AR": "hardware architecture",
    "cs.CC": "computational complexity",
    "cs.CE": "computational engineering, finance, and science",
    "cs.CG": "computational geometry",
    "cs.CL": "computation and language",
    "cs.CR": "cryptography and security",
    "cs.CV": "computer vision and pattern recognition",
    "cs.CY": "computers and society",
    "cs.DB": "databases",
    "cs.DC": "distributed, parallel, and cluster computing",
    "cs.DL": "digital libraries",
    "cs.DM": "discrete mathematics",
    "cs.DS": "data structures and algorithms",
    "cs.ET": "emerging technologies",
    "cs.FL": "formal languages and automata theory",
    "cs.GL": "general literature",
    "cs.GR": "graphics",
    "cs.GT": "computer science and game theory",
    "cs.HC": "human-computed interaction",
    "cs.IR": "information retrieval",
    "cs.IT": "information theory",
    "cs.LG": "machine learning",
    "cs.LO": "logic in computer science",
    "cs.MA": "multiagent systems",
    "cs.MM": "multimedia",
    "cs.MS": "mathematical software",
    "cs.NA": "numerical analysis",
    "cs.NE": "neural and evolutionary computing",
    "cs.NI": "networking and internet architecture",
    "cs.OH": "other computer science",
    "cs.OS": "operating systems",
    "cs.PF": "performance",
    "cs.PL": "programming languages",
    "cs.RO": "robotics",
    "cs.SC": "symbolic computing",
    "cs.SD": "sounds",
    "cs.SE": "software engineering",
    "cs.SI": "social and information networks",
    "cs.SY": "systems and control",
    "econ": "economics",
    "econ.EM": "econometrics",
    "econ.GN": "general economics",
    "econ.TH": "theoretical economics",
    "eess": "electrical engineering and systems science",
    "eess.AS": "audio and speech processing",
    "eess.IV": "image and video processing",
    "eess.SP": "signal processing",
    "eess.SY": "systems and control",
    "math": "mathematics",
    "math.AC": "commutative algebra",
    "math.AG": "algebraic geometry",
    "math.AP": "analysis of PDEs",
    "math.AT": "algebraic topology",
    "math.CA": "classical analysis and ODEs",
    "math.CO": "combinatorics",
    "math.CT": "category theory",
    "math.CV": "complex variables",
    "math.DG": "differential geometry",
    "math.DS": "dynamical systems",
    "math.FA": "functional analysis",
    "math.GM": "general mathematics",
    "math.GN": "general topology",
    "math.GR": "group theory",
    "math.GT": "geometric topology",
    "math.HO": "history and overview",
    "math.IT": "information theory",
    "math.KT": "k-theory and homology",
    "math.LO": "logic",
    "math.MG": "metric geometry",
    "math.MP": "mathematical physics",
    "math.NA": "numerical analysis",
    "math.NT": "number theory",
    "math.OA": "operator algebras",
    "math.OC": "optimization and control",
    "math.PR": "probability",
    "math.QA": "quantum algebra",
    "math.RA": "rings and algebras",
    "math.RT": "representation theory",
    "math.SG": "symplectic geometry",
    "math.SP": "spectral theory",
    "math.ST": "statistics theory",
    "astro-ph": "astrophysics",
    "astro-ph.CO": "cosmology and nongalactic astrophysics",
    "astro-ph.EP": "earth and planetary astrophysics",
    "astro-ph.GA": "astrophysics of galaxies",
    "astro-ph.HE": "high energy astrophysical phenomena",
    "astro-ph.IM": "instrumentation and Methods for astrophysics",
    "astro-ph.SR": "solar and stellar astrophysics",
    "cond-mat": "condensed matter",
    "cond-mat.dis-nn": "disordered systems and neural networks",
    "cond-mat.mes-hall": "mesoscale and nanoscale physics",
    "cond-mat.mtrl-sci": "materials science",
    "cond-mat.other (Other": "ondensed matter",
    "cond-mat.quant-gas": "quantum gases",
    "cond-mat.soft (Soft": "ondensed matter",
    "cond-mat.stat-mech": "statistical mechanics",
    "cond-mat.str-el": "strongly correlated electrons",
    "cond-mat.supr-con": "superconductivity",
    "gr": "general relativity and quantum cosmology",
    "gr-qc": "general relativity and quantum cosmology",
    "hep-ex": "high enerty physics - experiment",
    "hep-lat": "high enerty physics - lattice",
    "hep-ph": "high enerty physics - phenomenology",
    "hep-th": "high enerty physics - theory",
    "math-ph": "mathematical physics",
    "nlin": "nonlinear sciences",
    "nlin.AO": "adaptation and self-organizing systems",
    "nlin.CD": "chaotic dynamics",
    "nlin.CG": "cellular automata and lattice gases",
    "nlin.PS": "pattern formation and solitons",
    "nlin.SI": "exactly solvable and integrable systems",
    "nucl-ex": "nuclear experiment",
    "nucl-th": "nuclear theory",
    "physics.acc-ph": "accelerator physics",
    "physics.ao-ph": "atmospheric and oceanic physics",
    "physics.app-ph": "applied physics",
    "physics.atm-clus": "atomic and molecular clusters",
    "physics.atom-ph": "atomic physics",
    "physics.bio-ph": "biological physics",
    "physics.chem-ph": "chemical physics",
    "physics.class-ph": "classical physics",
    "physics.comp-ph": "computational physics",
    "physics.data-an": "data analysis, statistics and probability",
    "physics.ed-ph": "physics education",
    "physics.flu-dyn": "fluid dynamics",
    "physics.gen-ph": "general physics",
    "physics.geo-ph": "geophysics",
    "physics.hist-ph": "history and philosophy of physics",
    "physics.ins-det": "instrumentation and detectors",
    "physics.med-ph": "medical physics",
    "physics.optics": "optics",
    "physics.plasm-ph": "plasma physics",
    "physics.pop-ph": "popular physics",
    "physics.soc-ph": "physics and society",
    "physics.space-ph": "space physics",
    "quant-ph": "quantum physics",
    "q-bio": "quantitative biology",
    "q-bio.BM": "biomolecules",
    "q-bio.CB": "cell behavior",
    "q-bio.GN": "genomics",
    "q-bio.MN": "molecular networks",
    "q-bio.NC": "Neurons and cognition",
    "q-bio.OT": "other quantitative biology",
    "q-bio.PE": "populations and evolution",
    "q-bio.QM": "quantitative methods",
    "q-bio.SC": "subcellular processes",
    "q-bio.TO": "tissues and organs",
    "q-fin": "quantitative finance",
    "q-fin.CP": "computational finance",
    "q-fin.EC": "economics",
    "q-fin.GN": "general finance",
    "q-fin.MF": "mathematical finance",
    "q-fin.PM": "portfolio management",
    "q-fin.PR": "pricing of securities",
    "q-fin.RM": "risk management",
    "q-fin.ST": "statistical finance",
    "q-fin.TR": "trading and market microstructure",
    "stat": "statistics",
    "stat.AP": "applications",
    "stat.CO": "computation",
    "stat.ME": "methodology",
    "stat.ML": "machine learning",
    "stat.OT": "other statistics",
    "stat.TH": "statistics theory",
}


@st.cache_data
def prepare_model():
    """
    Prepare the tokenizer and the model for classification.
    """
    tokenizer = AutoTokenizer.from_pretrained("oracat/bert-paper-classifier-arxiv")
    model = AutoModelForSequenceClassification.from_pretrained(
        "oracat/bert-paper-classifier-arxiv"
    )
    return (tokenizer, model)


def top_pct(preds, threshold=0.95):
    """
    Output top predictions and their scores
    """
    preds = sorted(preds, key=lambda x: -x["score"])

    cum_score = 0
    for i, item in enumerate(preds):
        cum_score += item["score"]
        if cum_score >= threshold:
            break
        if item["score"] < 0.01:
            i -= 1
            break

    preds = preds[: (i + 1)]

    return preds


def format_predictions(preds) -> str:
    """
    Prepare predictions and their scores for printing to the user
    """
    out = ""
    for i, item in enumerate(preds):
        label = item["label"]
        score = item["score"]
        description = arxiv_categories.get(label, "")
        if description != "":
            out += f"{i+1}. **{item['label']}** ({description}) *(score {item['score']:.2f})*\n"
        else:
            out += f"{i+1}. **{item['label']}** *(score {item['score']:.2f})*\n"
    return out


def process(text):
    """
    Translate incoming text to tokens and classify it
    """
    pipe = pipeline("text-classification", model=model, tokenizer=tokenizer, top_k=None)
    result = pipe(text)[0]
    return format_predictions(top_pct(result))


tokenizer, model = prepare_model()


# State managements
#
# The state in the app is the title and the abstract.
# State management is used here in order to pre-fill
# input fields with values for demos.

if "title" not in st.session_state:
    st.session_state["title"] = ""

if "abstract" not in st.session_state:
    st.session_state["abstract"] = ""

if "output" not in st.session_state:
    st.session_state["output"] = ""


# Simple streamlit interface

st.markdown("### Paper classifier (arXiv taxonomy)")


## Demo buttons and their callbacks


def demo_cl_callback():
    """
    Use https://ai.facebook.com/blog/large-language-model-llama-meta-ai/ for demo
    """
    paper_title = (
        "Introducing LLaMA: A foundational, 65-billion-parameter large language model"
    )
    paper_abstract = "Over the last year, large language models β€” natural language processing (NLP) systems with billions of parameters β€” have shown new capabilities to generate creative text, solve mathematical theorems, predict protein structures, answer reading comprehension questions, and more. They are one of the clearest cases of the substantial potential benefits AI can offer at scale to billions of people. Smaller models trained on more tokens β€” which are pieces of words β€” are easier to retrain and fine-tune for specific potential product use cases. We trained LLaMA 65B and LLaMA 33B on 1.4 trillion tokens. Our smallest model, LLaMA 7B, is trained on one trillion tokens. Like other large language models, LLaMA works by taking a sequence of words as an input and predicts a next word to recursively generate text. To train our model, we chose text from the 20 languages with the most speakers, focusing on those with Latin and Cyrillic alphabets."
    st.session_state["title"] = paper_title
    st.session_state["abstract"] = paper_abstract


def demo_cv_callback():
    """
    Use https://arxiv.org/abs/2010.11929 for demo
    """
    paper_title = (
        "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale"
    )
    paper_abstract = "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train."
    st.session_state["title"] = paper_title
    st.session_state["abstract"] = paper_abstract


def clear_callback():
    """
    Clear input fields
    """
    st.session_state["title"] = ""
    st.session_state["abstract"] = ""
    st.session_state["output"] = ""


col1, col2, col3 = st.columns([1, 1, 1])
with col1:
    st.button("Demo: LLaMA paper", on_click=demo_cl_callback)
with col2:
    st.button("Demo: ViT paper", on_click=demo_cv_callback)
with col3:
    st.button("Clear fields", on_click=clear_callback)

## Input fields

placeholder = st.empty()

title = st.text_input("Enter the title:", key="title")
abstract = st.text_area(
    "... and maybe the abstract of the paper you want to classify:", key="abstract"
)

text = "\n".join([title, abstract])

## Output

if len(text.strip()) > 0:
    st.markdown(f"{process(text)}", unsafe_allow_html=True)