Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Language Creators:
unknown
Annotations Creators:
unknown
Tags:
License:
boudinfl commited on
Commit
94d34e8
1 Parent(s): 9067336

Adding stats + prmu scripts

Browse files
Files changed (4) hide show
  1. .gitignore +2 -1
  2. README.md +2 -2
  3. prmu.py +103 -0
  4. stats.ipynb +241 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  src/
2
  .DS_Store
3
- .idea/
 
1
  src/
2
  .DS_Store
3
+ .idea/
4
+ .ipynb_checkpoints/
README.md CHANGED
@@ -5,7 +5,7 @@
5
  TALN-Archives is a dataset for benchmarking keyphrase extraction and generation models.
6
  The dataset is composed of 1207 abstracts of scientific papers in French collected from the [TALN Archives](http://talnarchives.atala.org/).
7
  Keyphrases were annotated by authors in an uncontrolled setting (that is, not limited to thesaurus entries).
8
- English translations of title/abstract/keyphrases are also available for a subset of the documents, allowing to experiment with cross-lingual / multilingual keyphrase generation.
9
  Details about the dataset can be found in the original paper [(Boudin, 2013)][boudin-2013].
10
 
11
  Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021].
@@ -20,7 +20,7 @@ The dataset contains the following test split:
20
 
21
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
22
  | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: |
23
- | Test | 1207 | - | - | - | - | - | - |
24
 
25
  The following data fields are available :
26
 
5
  TALN-Archives is a dataset for benchmarking keyphrase extraction and generation models.
6
  The dataset is composed of 1207 abstracts of scientific papers in French collected from the [TALN Archives](http://talnarchives.atala.org/).
7
  Keyphrases were annotated by authors in an uncontrolled setting (that is, not limited to thesaurus entries).
8
+ English translations of title/abstract/keyphrases are also available for a subset of the documents (456 fully- and 719 partially-translated documents), allowing to experiment with cross-lingual / multilingual keyphrase generation.
9
  Details about the dataset can be found in the original paper [(Boudin, 2013)][boudin-2013].
10
 
11
  Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021].
20
 
21
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
22
  | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: |
23
+ | Test | 1207 | 138.3 | 4.12 | 53.83 | 12.32 | 21.69 | 12.16 |
24
 
25
  The following data fields are available :
26
 
prmu.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import sys
4
+ import json
5
+ import spacy
6
+
7
+ from nltk.stem.snowball import SnowballStemmer as Stemmer
8
+
9
+ nlp = spacy.load("fr_core_news_sm")
10
+
11
+ # https://spacy.io/usage/linguistic-features#native-tokenizer-additions
12
+
13
+ from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
14
+ from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
15
+ from spacy.util import compile_infix_regex
16
+
17
+ # Modify tokenizer infix patterns
18
+ infixes = (
19
+ LIST_ELLIPSES
20
+ + LIST_ICONS
21
+ + [
22
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
23
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
24
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
25
+ ),
26
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
27
+ # ✅ Commented out regex that splits on hyphens between letters:
28
+ # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
29
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
30
+ ]
31
+ )
32
+
33
+ infix_re = compile_infix_regex(infixes)
34
+ nlp.tokenizer.infix_finditer = infix_re.finditer
35
+
36
+
37
+ def contains(subseq, inseq):
38
+ return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
39
+
40
+
41
+ def find_pmru(tok_title, tok_text, tok_kp):
42
+ """Find PRMU category of a given keyphrase."""
43
+
44
+ # if kp is present
45
+ if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
46
+ return "P"
47
+
48
+ # if kp is considered as absent
49
+ else:
50
+
51
+ # find present and absent words
52
+ present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
53
+
54
+ # if "all" words are present
55
+ if len(present_words) == len(tok_kp):
56
+ return "R"
57
+ # if "some" words are present
58
+ elif len(present_words) > 0:
59
+ return "M"
60
+ # if "no" words are present
61
+ else:
62
+ return "U"
63
+
64
+
65
+ if __name__ == '__main__':
66
+
67
+ data = []
68
+
69
+ # read the dataset
70
+ with open(sys.argv[1], 'r') as f:
71
+ # loop through the documents
72
+ for line in f:
73
+ doc = json.loads(line.strip())
74
+
75
+ print(doc['id'])
76
+
77
+ title_spacy = nlp(doc['title'])
78
+ abstract_spacy = nlp(doc['abstract'])
79
+
80
+ title_tokens = [token.text for token in title_spacy]
81
+ abstract_tokens = [token.text for token in abstract_spacy]
82
+
83
+ title_stems = [Stemmer('french').stem(w.lower()) for w in title_tokens]
84
+ abstract_stems = [Stemmer('french').stem(w.lower()) for w in abstract_tokens]
85
+
86
+ keyphrases_stems = []
87
+ for keyphrase in doc['keyphrases']:
88
+ keyphrase_spacy = nlp(keyphrase)
89
+ keyphrase_tokens = [token.text for token in keyphrase_spacy]
90
+ keyphrase_stems = [Stemmer('french').stem(w.lower()) for w in keyphrase_tokens]
91
+ keyphrases_stems.append(keyphrase_stems)
92
+
93
+ prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
94
+
95
+ if doc['prmu'] != prmu:
96
+ print("PRMU categories are not identical!")
97
+
98
+ doc['prmu'] = prmu
99
+ data.append(json.dumps(doc))
100
+
101
+ # write the json
102
+ with open(sys.argv[2], 'w') as o:
103
+ o.write("\n".join(data))
stats.ipynb ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "No config specified, defaulting to: taln_archives/raw\n",
14
+ "Reusing dataset taln_archives (/Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___taln_archives/raw/1.0.0/edf5310cc76b9c9758d5785dec61311aadfc728038d62dcb784086552dce4173)\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "application/vnd.jupyter.widget-view+json": {
20
+ "model_id": "79acb44f1fca4c428c183d6762f7bb94",
21
+ "version_major": 2,
22
+ "version_minor": 0
23
+ },
24
+ "text/plain": [
25
+ " 0%| | 0/1 [00:00<?, ?it/s]"
26
+ ]
27
+ },
28
+ "metadata": {},
29
+ "output_type": "display_data"
30
+ }
31
+ ],
32
+ "source": [
33
+ "from datasets import load_dataset\n",
34
+ "\n",
35
+ "dataset = load_dataset('taln-ls2n/taln-archives')"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 2,
41
+ "id": "4ba72244",
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "application/vnd.jupyter.widget-view+json": {
47
+ "model_id": "c6b22792bf51480bac2b203ef7549c3c",
48
+ "version_major": 2,
49
+ "version_minor": 0
50
+ },
51
+ "text/plain": [
52
+ " 0%| | 0/1207 [00:00<?, ?it/s]"
53
+ ]
54
+ },
55
+ "metadata": {},
56
+ "output_type": "display_data"
57
+ },
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "statistics for test\n",
63
+ "# keyphrases: 4.12\n",
64
+ "% P: 53.83\n",
65
+ "% R: 12.32\n",
66
+ "% M: 21.69\n",
67
+ "% U: 12.16\n"
68
+ ]
69
+ }
70
+ ],
71
+ "source": [
72
+ "from tqdm.notebook import tqdm\n",
73
+ "\n",
74
+ "for split in ['test']:\n",
75
+ " \n",
76
+ " P, R, M, U, nb_kps = [], [], [], [], []\n",
77
+ " \n",
78
+ " for sample in tqdm(dataset[split]):\n",
79
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
80
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
81
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
82
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
83
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
84
+ " \n",
85
+ " print(\"statistics for {}\".format(split))\n",
86
+ " print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
87
+ " print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
88
+ " print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
89
+ " print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
90
+ " print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": 3,
96
+ "id": "52dda817",
97
+ "metadata": {},
98
+ "outputs": [],
99
+ "source": [
100
+ "import spacy\n",
101
+ "\n",
102
+ "nlp = spacy.load(\"fr_core_news_sm\")\n",
103
+ "\n",
104
+ "# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
105
+ "\n",
106
+ "from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
107
+ "from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
108
+ "from spacy.util import compile_infix_regex\n",
109
+ "\n",
110
+ "# Modify tokenizer infix patterns\n",
111
+ "infixes = (\n",
112
+ " LIST_ELLIPSES\n",
113
+ " + LIST_ICONS\n",
114
+ " + [\n",
115
+ " r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
116
+ " r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
117
+ " al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
118
+ " ),\n",
119
+ " r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
120
+ " # ✅ Commented out regex that splits on hyphens between letters:\n",
121
+ " # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
122
+ " r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
123
+ " ]\n",
124
+ ")\n",
125
+ "\n",
126
+ "infix_re = compile_infix_regex(infixes)\n",
127
+ "nlp.tokenizer.infix_finditer = infix_re.finditer"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 4,
133
+ "id": "047ab1cc",
134
+ "metadata": {},
135
+ "outputs": [
136
+ {
137
+ "data": {
138
+ "application/vnd.jupyter.widget-view+json": {
139
+ "model_id": "806cc5748b4a40b490a138d38a3590fb",
140
+ "version_major": 2,
141
+ "version_minor": 0
142
+ },
143
+ "text/plain": [
144
+ " 0%| | 0/1207 [00:00<?, ?it/s]"
145
+ ]
146
+ },
147
+ "metadata": {},
148
+ "output_type": "display_data"
149
+ },
150
+ {
151
+ "name": "stdout",
152
+ "output_type": "stream",
153
+ "text": [
154
+ "statistics for test\n",
155
+ "avg doc len: 138.3\n"
156
+ ]
157
+ }
158
+ ],
159
+ "source": [
160
+ "for split in ['test']:\n",
161
+ " doc_len = []\n",
162
+ " for sample in tqdm(dataset[split]):\n",
163
+ " doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
164
+ " \n",
165
+ " print(\"statistics for {}\".format(split))\n",
166
+ " print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len)))"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": 10,
172
+ "id": "03a61ca7",
173
+ "metadata": {},
174
+ "outputs": [
175
+ {
176
+ "data": {
177
+ "application/vnd.jupyter.widget-view+json": {
178
+ "model_id": "035722c1a78f42e6be7bd8333df9b124",
179
+ "version_major": 2,
180
+ "version_minor": 0
181
+ },
182
+ "text/plain": [
183
+ " 0%| | 0/1207 [00:00<?, ?it/s]"
184
+ ]
185
+ },
186
+ "metadata": {},
187
+ "output_type": "display_data"
188
+ },
189
+ {
190
+ "name": "stdout",
191
+ "output_type": "stream",
192
+ "text": [
193
+ "fully_translated: 456 / 0.38\n",
194
+ "partially_translated: 719 / 0.60\n"
195
+ ]
196
+ }
197
+ ],
198
+ "source": [
199
+ "fully_translated = 0\n",
200
+ "partially_translated = 0\n",
201
+ "for sample in tqdm(dataset['test']):\n",
202
+ " title, abstract, keywords = sample[\"translation\"]\n",
203
+ " if len(title) and len(abstract) and len(keywords):\n",
204
+ " fully_translated += 1\n",
205
+ " elif len(title) or len(abstract) or len(keywords):\n",
206
+ " partially_translated += 1\n",
207
+ "print(\"fully_translated: {} / {:.2f}\".format(fully_translated, fully_translated/len(dataset['test'])))\n",
208
+ "print(\"partially_translated: {} / {:.2f}\".format(partially_translated, partially_translated/len(dataset['test']))) "
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": null,
214
+ "id": "3f8dec4a",
215
+ "metadata": {},
216
+ "outputs": [],
217
+ "source": []
218
+ }
219
+ ],
220
+ "metadata": {
221
+ "kernelspec": {
222
+ "display_name": "Python 3 (ipykernel)",
223
+ "language": "python",
224
+ "name": "python3"
225
+ },
226
+ "language_info": {
227
+ "codemirror_mode": {
228
+ "name": "ipython",
229
+ "version": 3
230
+ },
231
+ "file_extension": ".py",
232
+ "mimetype": "text/x-python",
233
+ "name": "python",
234
+ "nbconvert_exporter": "python",
235
+ "pygments_lexer": "ipython3",
236
+ "version": "3.10.2"
237
+ }
238
+ },
239
+ "nbformat": 4,
240
+ "nbformat_minor": 5
241
+ }