vitaly commited on
Commit
6ea3f8d
1 Parent(s): d4bb227

postprocessing + new samples

Browse files
Files changed (1) hide show
  1. app.py +251 -61
app.py CHANGED
@@ -1,15 +1,23 @@
1
  import io
 
 
 
2
 
3
  import gradio as gr
4
  import numpy as np
5
  import spacy
6
  from spacy import displacy
 
7
  from spacy.training import Example
8
 
9
  from bib_tokenizers import create_references_tokenizer
10
  from schema import spankey_sentence_start, tags_ent
11
 
12
 
 
 
 
 
13
  nlp = spacy.load("en_bib_references_trf")
14
  # return score for each token:
15
  # with threshold set to zero each suggested span is returned, and span == token,
@@ -18,7 +26,7 @@ nlp = spacy.load("en_bib_references_trf")
18
  # @misc = "spacy.ngram_suggester.v1"
19
  # sizes = [1]
20
  nlp.get_pipe("spancat").cfg["threshold"] = 0.0 # see )
21
- print(nlp.get_pipe("spancat").cfg)
22
 
23
 
24
  def create_bib_item_start_scorer_for_doc(doc):
@@ -42,7 +50,7 @@ def create_bib_item_start_scorer_for_doc(doc):
42
  return span, max(
43
  span_group.attrs["scores"][i]
44
  for i in range(i - fuzzy_in_tokens[0], i + fuzzy_in_tokens[1] + 1)
45
- if i >= 0 and i < len(doc.text)
46
  )
47
 
48
  return scorer
@@ -50,10 +58,36 @@ def create_bib_item_start_scorer_for_doc(doc):
50
 
51
  nlp_blank = spacy.blank("en")
52
  nlp_blank.tokenizer = create_references_tokenizer()(nlp_blank)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
 
55
  def split_up_references(
56
- references: str, is_eol_mode=False, ner=True, nlp=nlp, nlp_blank=nlp_blank
57
  ):
58
  """
59
  Args:
@@ -62,6 +96,12 @@ def split_up_references(
62
  nlp_blank - a blank nlp with the same tokenizer/language
63
  """
64
 
 
 
 
 
 
 
65
  target_doc = nlp_blank(references)
66
  target_tokens_idx = {
67
  offset: t.i for t in target_doc for offset in range(t.idx, t.idx + len(t))
@@ -84,6 +124,12 @@ def split_up_references(
84
  # extremely useful spacy API for alignment normalized and target(created from non-modified input) docs
85
  example = Example(target_doc, norm_doc)
86
 
 
 
 
 
 
 
87
  if is_eol_mode:
88
  alignment_data = example.alignment.y2x.data
89
 
@@ -92,9 +138,21 @@ def split_up_references(
92
  for i, t in enumerate(target_doc):
93
  t.is_sent_start = i == 0
94
 
95
- char_offset = 0
96
  token_scorer = create_bib_item_start_scorer_for_doc(norm_doc)
 
 
 
 
 
 
 
 
 
 
 
97
  threshold = 0.5
 
 
98
  for line_num, line in enumerate(lines):
99
  if not line.strip():
100
  # ignore empty line
@@ -109,39 +167,120 @@ def split_up_references(
109
  ):
110
  token_index_in_target_doc += 1
111
 
112
- index_in_norm_doc = np.where(alignment_data == token_index_in_target_doc)
113
- if type(index_in_norm_doc) == tuple:
114
- index_in_norm_doc = index_in_norm_doc[0] # depends on numpy version...
115
-
116
- if index_in_norm_doc.size > 0:
117
- index_in_norm_doc = index_in_norm_doc[0].item()
118
- span, score = token_scorer(index_in_norm_doc)
119
- print(span, score, index_in_norm_doc)
120
- if score > threshold:
121
- target_doc[target_tokens_idx[char_offset]].is_sent_start = True
122
 
123
  char_offset += len(line)
 
 
124
  else:
125
  # copy SentenceRecognizer annotations from doc without '\n' to the target doc
126
  sent_start = example.get_aligned("SENT_START")
127
  for i, t in enumerate(target_doc):
128
  target_doc[i].is_sent_start = sent_start[i] == 1
129
 
130
- # copy ner annotations:
131
- for label in tags_ent:
132
- target_doc.vocab[label]
133
- target_doc.ents = example.get_aligned_spans_y2x(norm_doc.ents)
134
-
135
  return target_doc
136
 
137
 
138
- def text_analysis(text, is_eol_mode):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
140
  if not text or not text.strip():
141
  return "<div style='max-width:100%; overflow:auto; color:grey'><p>Unparsed Bibliography Section is empty</p></div>"
142
 
143
  doc_with_linebreaks = split_up_references(
144
- text, is_eol_mode=is_eol_mode, nlp=nlp, nlp_blank=nlp_blank
145
  )
146
 
147
  html = ""
@@ -198,15 +337,27 @@ with demo:
198
  placeholder="Enter bibliography here...",
199
  lines=20,
200
  )
201
- is_eol_mode = gr.components.Checkbox(
202
- label="a line does not contain more than one bibitem (Multiline bibitems are supported regardless of this choice)"
 
203
  )
204
  html = gr.components.HTML(label="Parsed Bib Items")
205
- textbox.change(fn=text_analysis, inputs=[textbox, is_eol_mode], outputs=[html])
206
- is_eol_mode.change(fn=text_analysis, inputs=[textbox, is_eol_mode], outputs=[html])
 
 
 
 
207
 
208
  gr.Examples(
209
  examples=[
 
 
 
 
 
 
 
210
  [
211
  """[1] B. Foxman, R. Barlow, H. D'Arcy, B. Gillespie, and J. D. Sobel, "Urinary tract infection: self-reported incidence and associated costs," Ann Epidemiol, vol. 10, pp. 509-515, 2000. [2] B. Foxman, "Epidemiology of urinary tract infections: incidence, morbidity, and economic costs," Am J Med, vol. 113, pp. 5-13, 2002. [3] L. Nicolle, "Urinary tract infections in the elderly," Clin Geriatr Med, vol. 25, pp. 423-436, 2009."""
212
  ],
@@ -228,43 +379,82 @@ CFR
228
  (4) Holzinger, E.R. et al. Genome-wide association study of plasma efavirenz pharmacokinetics in AIDS Clinical Trials Group protocols implicates several CYP2B6 variants. Pharmacogenet Genomics 22, 858-67 (2012).
229
  """
230
  ],
231
- [
232
- """[Ein05] Albert Einstein. Zur Elektrodynamik bewegter K ̈orper. (German)
233
- [On the electrodynamics of moving bodies]. Annalen der Physik,
234
- 322(10):891–921, 1905.
235
- [GMS93] Michel Goossens, Frank Mittelbach, and Alexander Samarin. The LATEX Companion. Addison-Wesley, Reading, Massachusetts, 1993.
236
- [Knu] Donald Knuth. Knuth: Computers and typesetting."""
 
 
 
 
 
 
 
 
 
 
 
 
237
  ],
238
- [
239
- """References.
240
- Bartkiewicz, A., Szymczak, M., Cohen, R. J., & Richards, A. M. S. 2005, MN- RAS, 361, 623
241
- Bartkiewicz, A., Szymczak, M., & van Langevelde, H. J. 2016, A&A, 587, A104
242
- Benjamin, R. A., Churchwell, E., Babler, B. L., et al. 2003, PASP, 115, 953
243
- Beuther, H., Mottram, J. C., Ahmadi, A., et al. 2018, A&A, 617, A100
244
- Beuther, H., Walsh, A. J., Thorwirth, S., et al. 2007, A&A, 466, 989
245
- Brogan, C. L., Hunter, T. R., Cyganowski, C. J., et al. 2011, ApJ, 739, L16
246
- Brown, A. T., Little, L. T., MacDonald, G. H., Riley, P. W., & Matheson, D. N.
247
- 1981, MNRAS, 195, 607
248
- Brown, R. D. & Cragg, D. M. 1991, ApJ, 378, 445
249
- Carrasco-González, C., Sanna, A., Rodríguez-Kamenetzky, A., et al. 2021, ApJ,
250
- 914, L1
251
- Cesaroni, R., Walmsley, C. M., & Churchwell, E. 1992, A&A, 256, 618
252
- Cheung, A. C., Rank, D. M., Townes, C. H., Thornton, D. D., & Welch, W. J.
253
- 1968, Phys. Rev. Lett., 21, 1701
254
- Churchwell, E., Babler, B. L., Meade, M. R., et al. 2009, PASP, 121, 213
255
- Cohen, R. J. & Brebner, G. C. 1985, MNRAS, 216, 51P
256
- Comito, C., Schilke, P., Endesfelder, U., Jiménez-Serra, I., & Martín-Pintado, J.
257
- 2007, A&A, 469, 207
258
- Curiel, S., Ho, P. T. P., Patel, N. A., et al. 2006, ApJ, 638, 878
259
- Danby, G., Flower, D. R., Valiron, P., Schilke, P., & Walmsley, C. M. 1988,
260
- MNRAS, 235, 229
261
- De Buizer, J. M., Liu, M., Tan, J. C., et al. 2017, ApJ, 843, 33
262
- De Buizer, J. M., Radomski, J. T., Telesco, C. M., & Piña, R. K. 2003, ApJ, 598,
263
- 1127
264
- Dzib, S., Loinard, L., Rodríguez, L. F., Mioduszewski, A. J., & Torres, R. M.
265
- 2011, ApJ, 733, 71
266
- Flower, D. R., Offer, A., & Schilke, P. 1990, MNRAS, 244, 4P
267
- Galván-Madrid, R., Keto, E., Zhang, Q., et al. 2009, ApJ, 706, 1036"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  ],
269
  ],
270
  inputs=textbox,
 
1
  import io
2
+ import logging
3
+ import timeit
4
+ from typing import Optional
5
 
6
  import gradio as gr
7
  import numpy as np
8
  import spacy
9
  from spacy import displacy
10
+ from spacy.matcher import Matcher
11
  from spacy.training import Example
12
 
13
  from bib_tokenizers import create_references_tokenizer
14
  from schema import spankey_sentence_start, tags_ent
15
 
16
 
17
+ logging.basicConfig(level=logging.INFO)
18
+ log = logging.getLogger(__name__)
19
+ _LOG_STR_LEN = 16
20
+
21
  nlp = spacy.load("en_bib_references_trf")
22
  # return score for each token:
23
  # with threshold set to zero each suggested span is returned, and span == token,
 
26
  # @misc = "spacy.ngram_suggester.v1"
27
  # sizes = [1]
28
  nlp.get_pipe("spancat").cfg["threshold"] = 0.0 # see )
29
+ log.info("spancat config: %s", nlp.get_pipe("spancat").cfg)
30
 
31
 
32
  def create_bib_item_start_scorer_for_doc(doc):
 
50
  return span, max(
51
  span_group.attrs["scores"][i]
52
  for i in range(i - fuzzy_in_tokens[0], i + fuzzy_in_tokens[1] + 1)
53
+ if i >= 0 and i < len(doc)
54
  )
55
 
56
  return scorer
 
58
 
59
  nlp_blank = spacy.blank("en")
60
  nlp_blank.tokenizer = create_references_tokenizer()(nlp_blank)
61
+ # nlp_blank.tokenizer = nlp.tokenizer
62
+
63
+
64
+ def _tokenize_test(nlp):
65
+ _text = """MNRAS, 216, 51P
66
+ Comito"""
67
+ tokens = [f"'{t}'" for t in nlp(_text)]
68
+ log.info("tokens: %s", tokens)
69
+ return tokens
70
+
71
+
72
+ assert len(_tokenize_test(nlp)) == len(
73
+ _tokenize_test(nlp_blank)
74
+ ), "Check that the same tokenizer is used for both: trained model (in its config) and nlp_blank"
75
+
76
+
77
+ def _token_index_in_norm_doc(
78
+ token_index_in_target_doc: int, alignment_data: np.ndarray
79
+ ) -> Optional[int]:
80
+
81
+ index_in_norm_doc = np.where(alignment_data == token_index_in_target_doc)
82
+ if type(index_in_norm_doc) == tuple:
83
+ index_in_norm_doc = index_in_norm_doc[0] # depends on numpy version...
84
+
85
+ if index_in_norm_doc.size > 0:
86
+ return index_in_norm_doc[0].item()
87
 
88
 
89
  def split_up_references(
90
+ references: str, is_eol_mode=True, ner=True, nlp=nlp, nlp_blank=nlp_blank
91
  ):
92
  """
93
  Args:
 
96
  nlp_blank - a blank nlp with the same tokenizer/language
97
  """
98
 
99
+ _timeit_start = timeit.default_timer()
100
+ log.info(
101
+ "start processing: '%s...'",
102
+ references[: _LOG_STR_LEN if len(references) > _LOG_STR_LEN else references],
103
+ )
104
+
105
  target_doc = nlp_blank(references)
106
  target_tokens_idx = {
107
  offset: t.i for t in target_doc for offset in range(t.idx, t.idx + len(t))
 
124
  # extremely useful spacy API for alignment normalized and target(created from non-modified input) docs
125
  example = Example(target_doc, norm_doc)
126
 
127
+ # copy ner annotations:
128
+ for label in tags_ent:
129
+ target_doc.vocab[label]
130
+ target_doc.ents = example.get_aligned_spans_y2x(norm_doc.ents)
131
+
132
+ # set senter annotations
133
  if is_eol_mode:
134
  alignment_data = example.alignment.y2x.data
135
 
 
138
  for i, t in enumerate(target_doc):
139
  t.is_sent_start = i == 0
140
 
 
141
  token_scorer = create_bib_item_start_scorer_for_doc(norm_doc)
142
+
143
+ def target_doc_token_scorer(token_index_in_target_doc):
144
+ index_in_norm_doc = _token_index_in_norm_doc(
145
+ token_index_in_target_doc, alignment_data
146
+ )
147
+ if index_in_norm_doc is not None:
148
+ span, score = token_scorer(index_in_norm_doc)
149
+ # print(span, score, index_in_norm_doc)
150
+ return score
151
+ return 0.0
152
+
153
  threshold = 0.5
154
+
155
+ char_offset = 0
156
  for line_num, line in enumerate(lines):
157
  if not line.strip():
158
  # ignore empty line
 
167
  ):
168
  token_index_in_target_doc += 1
169
 
170
+ score = target_doc_token_scorer(token_index_in_target_doc)
171
+ if score > threshold:
172
+ target_doc[target_tokens_idx[char_offset]].is_sent_start = True
 
 
 
 
 
 
 
173
 
174
  char_offset += len(line)
175
+
176
+ _level_off_references(target_doc, target_doc_token_scorer)
177
  else:
178
  # copy SentenceRecognizer annotations from doc without '\n' to the target doc
179
  sent_start = example.get_aligned("SENT_START")
180
  for i, t in enumerate(target_doc):
181
  target_doc[i].is_sent_start = sent_start[i] == 1
182
 
183
+ log.info(
184
+ "done: '%s...', elapsed: %s",
185
+ references[: _LOG_STR_LEN if len(references) > _LOG_STR_LEN else references],
186
+ timeit.default_timer() - _timeit_start,
187
+ )
188
  return target_doc
189
 
190
 
191
+ def _level_off_references(doc, token_scorer):
192
+ """
193
+ Problem:
194
+ if a model that predicts the reference boundaries was .99 accurate,
195
+ the success rate for real papers would be still relative low
196
+ given that a typical bibliography consists of dozens of references.
197
+
198
+ This function attemps to detect references that contain more lines than
199
+ others and split them somehow... The result will not neccessary be better.
200
+ """
201
+
202
+ lengths = np.array([len(ref.text.strip().split("\n")) for ref in doc.sents])
203
+ median = np.median(lengths)
204
+ mean = np.mean(lengths)
205
+ sigma = np.std(
206
+ lengths
207
+ ) # read this: https://stackoverflow.com/questions/27600207/why-does-numpy-std-give-a-different-result-to-matlab-std
208
+
209
+ log.info("median: %s, mean: %s, sigma: %s", median, mean, sigma)
210
+ if sigma == 0.0:
211
+ return
212
+
213
+ sent_starts = []
214
+ matcher = Matcher(nlp.vocab)
215
+ pattern = [
216
+ # {"TEXT": {"REGEX": "^(.*)(\\n)+(.*)$"}, "IS_SPACE": True},
217
+ {"TEXT": {"REGEX": "^(.*\\n.*)+$"}, "IS_SPACE": True},
218
+ {"IS_SPACE": True, "OP": "*"},
219
+ {"IS_SPACE": False},
220
+ ]
221
+ matcher.add("line_start", [pattern])
222
+ for n, ref in enumerate(doc.sents):
223
+ # print([f"'{t}'" for t in ref])
224
+ surprising = (lengths[n] - mean) / sigma
225
+ if surprising > 1.6:
226
+ log.info("surprising: %s: %s", surprising, ref.text[:_LOG_STR_LEN])
227
+ scores = [token_scorer(t.i) for t in ref]
228
+ median_score = np.median(scores)
229
+ # check each first non-space token on each line
230
+ start = None # next reference start is we decided to splip up the ref span
231
+ for _, eol, token_i_after_eol in matcher(ref):
232
+ i = token_i_after_eol - 1
233
+ # using the predicted spancat score
234
+ log.info(
235
+ "line start: token=%s, score=%s, median_score=%s, ahead=%s",
236
+ ref[i],
237
+ scores[i],
238
+ median_score,
239
+ len(ref[token_i_after_eol:]),
240
+ )
241
+ # TODO: play with softmax temperature: find a way to get activations:
242
+ # here we have an activated neuron in the softmax input, but corresponding sofmax output is still too low
243
+ if scores[i] > 10 * median_score and len(ref[token_i_after_eol:]) > 10:
244
+ sent_starts.append(ref[i])
245
+ start = i
246
+ continue
247
+
248
+ # using ner output:
249
+ # an edge case if newx line starts with citation number of namnes and
250
+ # pref libes already contain names and title
251
+ before_eol_ents = [
252
+ ent.label_ for ent in ref[0 if start is None else start : eol].ents
253
+ ]
254
+ # 2 entities after eol, if any
255
+ after_eol_ents = [ent.label_ for ent in ref[eol:].ents][:2]
256
+ if (
257
+ set(before_eol_ents) & set(["issued", "title", "container-title"])
258
+ and set(before_eol_ents) & set(["family", "given"])
259
+ and set(after_eol_ents)
260
+ & set(
261
+ [
262
+ "family",
263
+ "given",
264
+ "citation-number",
265
+ "citation-label",
266
+ ]
267
+ )
268
+ ):
269
+ log.info("splitting up using NER predictions: %s", ref[i])
270
+ sent_starts.append(ref[i])
271
+ start = i
272
+
273
+ for t in sent_starts:
274
+ t.is_sent_start = True
275
+
276
+
277
+ def text_analysis(text: str, more_than_one_ref_per_line: bool):
278
 
279
  if not text or not text.strip():
280
  return "<div style='max-width:100%; overflow:auto; color:grey'><p>Unparsed Bibliography Section is empty</p></div>"
281
 
282
  doc_with_linebreaks = split_up_references(
283
+ text, is_eol_mode=not more_than_one_ref_per_line, nlp=nlp, nlp_blank=nlp_blank
284
  )
285
 
286
  html = ""
 
337
  placeholder="Enter bibliography here...",
338
  lines=20,
339
  )
340
+ more_than_one_ref_per_line = gr.components.Checkbox(
341
+ value=False,
342
+ label="My bibliography may contain more than one reference per line - the model will make a prediction for each token: more predictions, more chances to make a mistake",
343
  )
344
  html = gr.components.HTML(label="Parsed Bib Items")
345
+ textbox.change(
346
+ fn=text_analysis, inputs=[textbox, more_than_one_ref_per_line], outputs=[html]
347
+ )
348
+ more_than_one_ref_per_line.change(
349
+ fn=text_analysis, inputs=[textbox, more_than_one_ref_per_line], outputs=[html]
350
+ )
351
 
352
  gr.Examples(
353
  examples=[
354
+ [
355
+ """[Ein05] Albert Einstein. Zur Elektrodynamik bewegter K ̈orper. (German)
356
+ [On the electrodynamics of moving bodies]. Annalen der Physik,
357
+ 322(10):891–921, 1905.
358
+ [GMS93] Michel Goossens, Frank Mittelbach, and Alexander Samarin. The LATEX Companion. Addison-Wesley, Reading, Massachusetts, 1993.
359
+ [Knu] Donald Knuth. Knuth: Computers and typesetting."""
360
+ ],
361
  [
362
  """[1] B. Foxman, R. Barlow, H. D'Arcy, B. Gillespie, and J. D. Sobel, "Urinary tract infection: self-reported incidence and associated costs," Ann Epidemiol, vol. 10, pp. 509-515, 2000. [2] B. Foxman, "Epidemiology of urinary tract infections: incidence, morbidity, and economic costs," Am J Med, vol. 113, pp. 5-13, 2002. [3] L. Nicolle, "Urinary tract infections in the elderly," Clin Geriatr Med, vol. 25, pp. 423-436, 2009."""
363
  ],
 
379
  (4) Holzinger, E.R. et al. Genome-wide association study of plasma efavirenz pharmacokinetics in AIDS Clinical Trials Group protocols implicates several CYP2B6 variants. Pharmacogenet Genomics 22, 858-67 (2012).
380
  """
381
  ],
382
+ [ # https://arxiv.org/pdf/1910.01108v4.pdf
383
+ """Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL-HLT, 2018.
384
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. Language models are unsupervised multitask learners. 2019.
385
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar S. Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke S. Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. ArXiv, abs/1907.11692, 2019.
386
+ Roy Schwartz, Jesse Dodge, Noah A. Smith, and Oren Etzioni. Green ai. ArXiv, abs/1907.10597, 2019. Emma Strubell, Ananya Ganesh, and Andrew McCallum. Energy and policy considerations for deep learning in
387
+ nlp. In ACL, 2019.
388
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser,
389
+ and Illia Polosukhin. Attention is all you need. In NIPS, 2017.
390
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, and Jamie Brew. Transformers: State-of-the-art natural language processing, 2019.
391
+ Cristian Bucila, Rich Caruana, and Alexandru Niculescu-Mizil. Model compression. In KDD, 2006.
392
+ Geoffrey E. Hinton, Oriol Vinyals, and Jeffrey Dean. Distilling the knowledge in a neural network. ArXiv,
393
+ abs/1503.02531, 2015.
394
+ Yukun Zhu, Ryan Kiros, Richard S. Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. 2015 IEEE International Conference on Computer Vision (ICCV), pages 19–27, 2015.
395
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. In ICLR, 2018.
396
+ Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In NAACL, 2018.
397
+ Alex Wang, Ian F. Tenney, Yada Pruksachatkun, Katherin Yu, Jan Hula, Patrick Xia, Raghu Pappagari, Shuning Jin, R. Thomas McCoy, Roma Patel, Yinghui Huang, Jason Phang, Edouard Grave, Najoung Kim, Phu Mon Htut, Thibault F’evry, Berlin Chen, Nikita Nangia, Haokun Liu, Anhad Mohananey, Shikha Bordia, Nicolas Patry, Ellie Pavlick, and Samuel R. Bowman. jiant 1.1: A software toolkit for research on general-purpose text understanding models. http://jiant.info/, 2019.
398
+ Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. Learning word vectors for sentiment analysis. In ACL, 2011.
399
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. Squad: 100, 000+ questions for machine comprehension of text. In EMNLP, 2016."""
400
  ],
401
+ [ # https://isg.beel.org/blog/2019/12/10/giant-the-1-billion-annotated-synthetic-bibliographic-reference-string-dataset-for-deep-citation-parsing-pre-print/
402
+ """Crossref, https://www.crossref.org
403
+ A JavaScript implementation of the Citation Style Language (CSL),
404
+ https://github.com/Juris-M/citeproc-js
405
+ Official repository for Citation Style Language (CSL),
406
+ https://github.com/citation-style-language/styles
407
+ Anzaroot, S., McCallum, A.: A New Dataset for fine-Grained Citation field Extraction (2013)
408
+ Councill, I.G., Giles, C.L., Kan, M.Y.: Parscit: an open-source crf reference string parsing package. In: LREC. vol. 8, pp. 661–667 (2008)
409
+ Fedoryszak, M., Tkaczyk, D., Bolikowski, L.: Large scale citation matching using apache hadoop. In: International Conference on Theory and Practice of Digital Libraries. pp. 362–365. Springer (2013)
410
+ Hetzner, E.: A simple method for citation metadata extraction using hidden markov models. In: Proceedings of the 8th ACM/IEEE-CS joint conference on Digital libraries. pp. 280–284. ACM (2008)
411
+ Lample, G., Ballesteros, M., Subramanian, S., Kawakami, K., Dyer, C.: Neural architectures for named entity recognition. arXiv preprint arXiv:1603.01360 (2016)
412
+ Lopez, P.: Grobid: Combining automatic bibliographic data recognition and term extraction for scholarship publications. In: International conference on theory and practice of digital libraries. pp. 473–474. Springer (2009)
413
+ Ma, X., Hovy, E.: End-to-end sequence labeling via bi-directional lstm-cnns-crf. arXiv preprint arXiv:1603.01354 (2016)
414
+ Mikolov, T., Sutskever, I., Chen, K., Corrado, G.S., Dean, J.: Distributed representations of words and phrases and their compositionality. In: Advances in neural information processing systems. pp. 3111–3119 (2013)
415
+ Ojokoh, B., Zhang, M., Tang, J.: A trigram hidden markov model for metadata extraction from heterogeneous references. Information Sciences 181(9), 1538–1551
416
+ (2011)
417
+ Okada, T., Takasu, A., Adachi, J.: Bibliographic component extraction using support vector machines and hidden markov models. In: International Conference on
418
+ Theory and Practice of Digital Libraries. pp. 501–512. Springer (2004)
419
+ Prasad, A., Kaur, M., Kan, M.Y.: Neural parscit: a deep learning-based reference string parser. International Journal on Digital Libraries 19(4), 323–337 (2018)
420
+ Rodrigues Alves, D., Colavizza, G., Kaplan, F.: Deep reference mining from scholarly literature in the arts and humanities. Frontiers in Research Metrics and Analytics 3, 21 (2018)
421
+ Tkaczyk, D., Collins, A., Sheridan, P., Beel, J.: Machine learning vs. rules and out-of-the-box vs. retrained: An evaluation of open-source bibliographic reference and citation parsers. In: Proceedings of the 18th ACM/IEEE on joint conference on digital libraries. pp. 99–108. ACM (2018)
422
+ Tkaczyk, D., Szostek, P., Dendek, P.J., Fedoryszak, M., Bolikowski, L.: Cermine– automatic extraction of metadata and references from scientific literature. In: 2014 11th IAPR International Workshop on Document Analysis Systems. pp. 217–221. IEEE (2014)
423
+ Yin, P., Zhang, M., Deng, Z., Yang, D.: Metadata extraction from bibliographies using bigram hmm. In: International Conference on Asian Digital Libraries. pp.
424
+ 310–319. Springer (2004)
425
+ Zhang, X., Zou, J., Le, D.X., Thoma, G.R.: A structural svm approach for reference parsing. BMC bioinformatics 12(3), S7 (2011)"""
426
+ ],
427
+ [ # https://arxiv.org/pdf/1706.03762.pdf
428
+ """[28] Romain Paulus, Caiming Xiong, and Richard Socher. A deep reinforced model for abstractive
429
+ summarization. arXiv preprint arXiv:1705.04304, 2017.
430
+ [29] Slav Petrov, Leon Barrett, Romain Thibaux, and Dan Klein. Learning accurate, compact,
431
+ and interpretable tree annotation. In Proceedings of the 21st International Conference on
432
+ Computational Linguistics and 44th Annual Meeting of the ACL, pages 433–440. ACL, July
433
+ 2006.
434
+ [30] Ofir Press and Lior Wolf. Using the output embedding to improve language models. arXiv preprint
435
+ arXiv:1608.05859, 2016.
436
+ [31] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words
437
+ with subword units. arXiv preprint arXiv:1508.07909, 2015.
438
+ [32] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton,
439
+ and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts
440
+ layer. arXiv preprint arXiv:1701.06538, 2017.
441
+ [33] Nitish Srivastava, Geoffrey E Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdi-
442
+ nov. Dropout: a simple way to prevent neural networks from overfitting. Journal of Machine
443
+ Learning Research, 15(1):1929–1958, 2014.
444
+ [34] Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, and Rob Fergus. End-to-end memory
445
+ networks. In C. Cortes, N. D. Lawrence, D. D. Lee, M. Sugiyama, and R. Garnett, editors,
446
+ Advances in Neural Information Processing Systems 28, pages 2440–2448. Curran Associates,
447
+ Inc., 2015.
448
+ [35] Ilya Sutskever, Oriol Vinyals, and Quoc VV Le. Sequence to sequence learning with neural
449
+ networks. In Advances in Neural Information Processing Systems, pages 3104–3112, 2014.
450
+ [36] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna.
451
+ Rethinking the inception architecture for computer vision. CoRR, abs/1512.00567, 2015.
452
+ [37] Vinyals & Kaiser, Koo, Petrov, Sutskever, and Hinton. Grammar as a foreign language. In
453
+ Advances in Neural Information Processing Systems, 2015.
454
+ [38] Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang
455
+ Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google’s neural machine
456
+ translation system: Bridging the gap between human and machine translation. arXiv preprint
457
+ arXiv:1609.08144, 2016."""
458
  ],
459
  ],
460
  inputs=textbox,