Datasets:

Languages:
French
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
unknown
Annotations Creators:
unknown
Tags:
License:
boudinfl commited on
Commit
34c0079
1 Parent(s): 45d173d

Add stats and prmu script

Browse files
Files changed (4) hide show
  1. .gitignore +2 -1
  2. README.md +1 -1
  3. prmu.py +103 -0
  4. stats.ipynb +192 -0
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
 
2
  **.DS_Store
3
  .idea
4
- src/
 
 
1
 
2
  **.DS_Store
3
  .idea
4
+ src/
5
+ .ipynb_checkpoints/
README.md CHANGED
@@ -19,7 +19,7 @@ The dataset is divided into the following three splits:
19
 
20
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
21
  | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: |
22
- | Test | 100 | | | | | | |
23
 
24
  The following data fields are available :
25
 
 
19
 
20
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
21
  | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: |
22
+ | Test | 100 | 306.9 | 9.64 | 95.91 | 1.40 | 0.85 | 1.84 |
23
 
24
  The following data fields are available :
25
 
prmu.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import sys
4
+ import json
5
+ import spacy
6
+
7
+ from nltk.stem.snowball import SnowballStemmer as Stemmer
8
+
9
+ nlp = spacy.load("fr_core_news_sm")
10
+
11
+ # https://spacy.io/usage/linguistic-features#native-tokenizer-additions
12
+
13
+ from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
14
+ from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
15
+ from spacy.util import compile_infix_regex
16
+
17
+ # Modify tokenizer infix patterns
18
+ infixes = (
19
+ LIST_ELLIPSES
20
+ + LIST_ICONS
21
+ + [
22
+ r"(?<=[0-9])[+\-\*^](?=[0-9-])",
23
+ r"(?<=[{al}{q}])\.(?=[{au}{q}])".format(
24
+ al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES
25
+ ),
26
+ r"(?<=[{a}]),(?=[{a}])".format(a=ALPHA),
27
+ # ✅ Commented out regex that splits on hyphens between letters:
28
+ # r"(?<=[{a}])(?:{h})(?=[{a}])".format(a=ALPHA, h=HYPHENS),
29
+ r"(?<=[{a}0-9])[:<>=/](?=[{a}])".format(a=ALPHA),
30
+ ]
31
+ )
32
+
33
+ infix_re = compile_infix_regex(infixes)
34
+ nlp.tokenizer.infix_finditer = infix_re.finditer
35
+
36
+
37
+ def contains(subseq, inseq):
38
+ return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
39
+
40
+
41
+ def find_pmru(tok_title, tok_text, tok_kp):
42
+ """Find PRMU category of a given keyphrase."""
43
+
44
+ # if kp is present
45
+ if contains(tok_kp, tok_title) or contains(tok_kp, tok_text):
46
+ return "P"
47
+
48
+ # if kp is considered as absent
49
+ else:
50
+
51
+ # find present and absent words
52
+ present_words = [w for w in tok_kp if w in tok_title or w in tok_text]
53
+
54
+ # if "all" words are present
55
+ if len(present_words) == len(tok_kp):
56
+ return "R"
57
+ # if "some" words are present
58
+ elif len(present_words) > 0:
59
+ return "M"
60
+ # if "no" words are present
61
+ else:
62
+ return "U"
63
+
64
+
65
+ if __name__ == '__main__':
66
+
67
+ data = []
68
+
69
+ # read the dataset
70
+ with open(sys.argv[1], 'r') as f:
71
+ # loop through the documents
72
+ for line in f:
73
+ doc = json.loads(line.strip())
74
+
75
+ print(doc['id'])
76
+
77
+ title_spacy = nlp(doc['title'])
78
+ abstract_spacy = nlp(doc['abstract'])
79
+
80
+ title_tokens = [token.text for token in title_spacy]
81
+ abstract_tokens = [token.text for token in abstract_spacy]
82
+
83
+ title_stems = [Stemmer('french').stem(w.lower()) for w in title_tokens]
84
+ abstract_stems = [Stemmer('french').stem(w.lower()) for w in abstract_tokens]
85
+
86
+ keyphrases_stems = []
87
+ for keyphrase in doc['keyphrases']:
88
+ keyphrase_spacy = nlp(keyphrase)
89
+ keyphrase_tokens = [token.text for token in keyphrase_spacy]
90
+ keyphrase_stems = [Stemmer('french').stem(w.lower()) for w in keyphrase_tokens]
91
+ keyphrases_stems.append(keyphrase_stems)
92
+
93
+ prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
94
+
95
+ if doc['prmu'] != prmu:
96
+ print("PRMU categories are not identical!")
97
+
98
+ doc['prmu'] = prmu
99
+ data.append(json.dumps(doc))
100
+
101
+ # write the json
102
+ with open(sys.argv[2], 'w') as o:
103
+ o.write("\n".join(data))
stats.ipynb ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "No config specified, defaulting to: inspec/raw\n",
14
+ "Reusing dataset inspec (/Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___inspec/raw/1.0.0/0980ea60c840383eb282b6272baba681a578ed092f61438b008254c70d20f32b)\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "application/vnd.jupyter.widget-view+json": {
20
+ "model_id": "2ad1b39fd3294bcfabe57a9acf24986e",
21
+ "version_major": 2,
22
+ "version_minor": 0
23
+ },
24
+ "text/plain": [
25
+ " 0%| | 0/1 [00:00<?, ?it/s]"
26
+ ]
27
+ },
28
+ "metadata": {},
29
+ "output_type": "display_data"
30
+ }
31
+ ],
32
+ "source": [
33
+ "from datasets import load_dataset\n",
34
+ "\n",
35
+ "dataset = load_dataset('taln-ls2n/wikinews-fr-100')"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 2,
41
+ "id": "4ba72244",
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "application/vnd.jupyter.widget-view+json": {
47
+ "model_id": "9bded16e4b0a43ad8907144bce073d0c",
48
+ "version_major": 2,
49
+ "version_minor": 0
50
+ },
51
+ "text/plain": [
52
+ " 0%| | 0/100 [00:00<?, ?it/s]"
53
+ ]
54
+ },
55
+ "metadata": {},
56
+ "output_type": "display_data"
57
+ },
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "statistics for test\n",
63
+ "# keyphrases: 9.64\n",
64
+ "% P: 95.91\n",
65
+ "% R: 1.40\n",
66
+ "% M: 0.85\n",
67
+ "% U: 1.84\n"
68
+ ]
69
+ }
70
+ ],
71
+ "source": [
72
+ "from tqdm.notebook import tqdm\n",
73
+ "\n",
74
+ "for split in ['test']:\n",
75
+ " \n",
76
+ " P, R, M, U, nb_kps = [], [], [], [], []\n",
77
+ " \n",
78
+ " for sample in tqdm(dataset[split]):\n",
79
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
80
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
81
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
82
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
83
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
84
+ " \n",
85
+ " print(\"statistics for {}\".format(split))\n",
86
+ " print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
87
+ " print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
88
+ " print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
89
+ " print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
90
+ " print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": 3,
96
+ "id": "52dda817",
97
+ "metadata": {},
98
+ "outputs": [],
99
+ "source": [
100
+ "import spacy\n",
101
+ "\n",
102
+ "nlp = spacy.load(\"fr_core_news_sm\")\n",
103
+ "\n",
104
+ "# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
105
+ "\n",
106
+ "from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
107
+ "from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
108
+ "from spacy.util import compile_infix_regex\n",
109
+ "\n",
110
+ "# Modify tokenizer infix patterns\n",
111
+ "infixes = (\n",
112
+ " LIST_ELLIPSES\n",
113
+ " + LIST_ICONS\n",
114
+ " + [\n",
115
+ " r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
116
+ " r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
117
+ " al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
118
+ " ),\n",
119
+ " r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
120
+ " # ✅ Commented out regex that splits on hyphens between letters:\n",
121
+ " # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
122
+ " r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
123
+ " ]\n",
124
+ ")\n",
125
+ "\n",
126
+ "infix_re = compile_infix_regex(infixes)\n",
127
+ "nlp.tokenizer.infix_finditer = infix_re.finditer"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 4,
133
+ "id": "047ab1cc",
134
+ "metadata": {},
135
+ "outputs": [
136
+ {
137
+ "data": {
138
+ "application/vnd.jupyter.widget-view+json": {
139
+ "model_id": "135b8cd19d054319a445df200d82cc65",
140
+ "version_major": 2,
141
+ "version_minor": 0
142
+ },
143
+ "text/plain": [
144
+ " 0%| | 0/100 [00:00<?, ?it/s]"
145
+ ]
146
+ },
147
+ "metadata": {},
148
+ "output_type": "display_data"
149
+ },
150
+ {
151
+ "name": "stdout",
152
+ "output_type": "stream",
153
+ "text": [
154
+ "statistics for test\n",
155
+ "avg doc len: 306.9\n"
156
+ ]
157
+ }
158
+ ],
159
+ "source": [
160
+ "for split in ['test']:\n",
161
+ " doc_len = []\n",
162
+ " for sample in tqdm(dataset[split]):\n",
163
+ " doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
164
+ " \n",
165
+ " print(\"statistics for {}\".format(split))\n",
166
+ " print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len)))\n",
167
+ " "
168
+ ]
169
+ }
170
+ ],
171
+ "metadata": {
172
+ "kernelspec": {
173
+ "display_name": "Python 3 (ipykernel)",
174
+ "language": "python",
175
+ "name": "python3"
176
+ },
177
+ "language_info": {
178
+ "codemirror_mode": {
179
+ "name": "ipython",
180
+ "version": 3
181
+ },
182
+ "file_extension": ".py",
183
+ "mimetype": "text/x-python",
184
+ "name": "python",
185
+ "nbconvert_exporter": "python",
186
+ "pygments_lexer": "ipython3",
187
+ "version": "3.10.2"
188
+ }
189
+ },
190
+ "nbformat": 4,
191
+ "nbformat_minor": 5
192
+ }