MHoubre commited on
Commit
8763e71
1 Parent(s): 87941da

getting prmu and reordering present keyphrases

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. data.jsonl +2 -2
  3. prmu.py +131 -0
  4. pubmed.py +4 -2
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ data.jsonl filter=lfs diff=lfs merge=lfs -text
data.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a03b0b004ab77c9ada8e4fcc6912d023ef85905cf80b2415a1bc032fdcfdf888
3
- size 40569656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56e6b432c7c37f32067d8df68eddeff90eca027448d9b80038d5d2ea7f685065
3
+ size 40486751
prmu.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ from this import d
8
+ from datasets import load_dataset, load_from_disk
9
+ import spacy
10
+ import re
11
+ # from spacy.lang.en import English
12
+ from spacy.tokenizer import _get_regex_pattern
13
+ from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
14
+ from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
15
+ from spacy.util import compile_infix_regex
16
+ from nltk.stem.snowball import SnowballStemmer as Stemmer
17
+ import numpy as np
18
+ import sys
19
+
20
+ # In[2]:
21
+
22
+ print("LOADING DATASET")
23
+ dataset = load_dataset("json", data_files={"test":"data.jsonl"})
24
+
25
+
26
+
27
+ # In[3]:
28
+
29
+ nlp = spacy.load("en_core_web_sm")
30
+ re_token_match = _get_regex_pattern(nlp.Defaults.token_match)
31
+ re_token_match = f"({re_token_match}|\w+-\w+)"
32
+ nlp.tokenizer.token_match = re.compile(re_token_match).match
33
+
34
+
35
+
36
+ # Modify tokenizer infix patterns
37
+ infixes = (
38
+ LIST_ELLIPSES
39
+ + LIST_ICONS
40
+ + [
41
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
42
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
43
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
44
+ ),
45
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
46
+ # ✅ Commented out regex that splits on hyphens between letters:
47
+ # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
48
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
49
+ ]
50
+ )
51
+
52
+ infix_re = compile_infix_regex(infixes)
53
+ nlp.tokenizer.infix_finditer = infix_re.finditer
54
+
55
+
56
+ # In[5]:
57
+
58
+
59
+ def contains(subseq, inseq):
60
+ return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
61
+
62
+
63
+
64
+
65
+ def find_pmru(tok_title, tok_text, tok_kp):
66
+ """Find PRMU category of a given keyphrase."""
67
+
68
+ # if kp is present
69
+ if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
70
+ return "P"
71
+
72
+ # if kp is considered as absent
73
+ else:
74
+
75
+ # find present and absent words
76
+ present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
77
+
78
+ # if "all" words are present
79
+ if len(present_words) == len(tok_kp):
80
+ return "R"
81
+ # if "some" words are present
82
+ elif len(present_words) > 0:
83
+ return "M"
84
+ # if "no" words are present
85
+ else:
86
+ return "U"
87
+ return prmu
88
+
89
+ def tokenize(dataset):
90
+ keyphrases_stems= []
91
+ for keyphrase in dataset["keyphrases"]:
92
+ keyphrase_spacy = nlp(keyphrase)
93
+ keyphrase_tokens = [token.text for token in keyphrase_spacy]
94
+ keyphrase_stems = [Stemmer('porter').stem(w.lower()) for w in keyphrase_tokens]
95
+ keyphrase_stems = " ".join(keyphrase_stems)
96
+ keyphrases_stems.append(keyphrase_stems)
97
+
98
+ dataset["tokenized_keyphrases"] = keyphrases_stems
99
+ return dataset
100
+
101
+ """
102
+ Function that tokenizes the dataset (title, text and keyphrases)
103
+ and runs the prmu algorithm.
104
+ """
105
+ def prmu_dataset(dataset):
106
+ title_spacy = nlp(dataset['title'])
107
+ abstract_spacy = nlp(dataset['text'])
108
+
109
+ title_tokens = [token.text for token in title_spacy]
110
+ abstract_tokens = [token.text for token in abstract_spacy]
111
+
112
+ title_stems = [Stemmer('porter').stem(w.lower()) for w in title_tokens]
113
+ abstract_stems = [Stemmer('porter').stem(w.lower()) for w in abstract_tokens]
114
+
115
+ prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in dataset["tokenized_keyphrases"]]
116
+
117
+ dataset['prmu'] = prmu
118
+
119
+ return dataset
120
+
121
+
122
+ # In[6]:
123
+
124
+
125
+ print("TOKENIZATION")
126
+ dataset = dataset.map(tokenize,num_proc=sys.argv[1])
127
+
128
+ print("GETTING PRMU")
129
+ dataset = dataset.map(prmu_dataset,num_proc=sys.argv[1])
130
+
131
+ dataset["test"].to_json("data.jsonl")
pubmed.py CHANGED
@@ -63,9 +63,10 @@ class Pubmed(datasets.GeneratorBasedBuilder):
63
  features = datasets.Features(
64
  {
65
  "id": datasets.Value("string"),
 
66
  "text": datasets.Value("string"),
67
  "keyphrases": datasets.features.Sequence(datasets.Value("string")),
68
- "stemmed_keyphrases": datasets.features.Sequence(datasets.Value("string"))
69
  }
70
  )
71
  return datasets.DatasetInfo(
@@ -116,8 +117,9 @@ class Pubmed(datasets.GeneratorBasedBuilder):
116
  # Yields examples as (key, example) tuples
117
  yield key, {
118
  "id": data["id"],
 
119
  "text": data["text"],
120
  "keyphrases": data["keyphrases"],
121
- "stemmed_keyphrases": data["stemmed_keyphrases"]
122
  }
123
 
 
63
  features = datasets.Features(
64
  {
65
  "id": datasets.Value("string"),
66
+ "title": datasets.Value("string"),
67
  "text": datasets.Value("string"),
68
  "keyphrases": datasets.features.Sequence(datasets.Value("string")),
69
+ "prmu": datasets.features.Sequence(datasets.Value("string"))
70
  }
71
  )
72
  return datasets.DatasetInfo(
 
117
  # Yields examples as (key, example) tuples
118
  yield key, {
119
  "id": data["id"],
120
+ "title": data["title"],
121
  "text": data["text"],
122
  "keyphrases": data["keyphrases"],
123
+ "prmu": data["prmu"]
124
  }
125