Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
unknown
Annotations Creators:
unknown
Tags:
License:
boudinfl commited on
Commit
70c2b11
1 Parent(s): 49c1926

Adding stats

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. README.md +2 -2
  3. stats.ipynb +184 -0
.gitignore CHANGED
@@ -2,3 +2,4 @@
2
  .idea/
3
  src/
4
  **.DS_Store
 
2
  .idea/
3
  src/
4
  **.DS_Store
5
+ .ipynb_checkpoints/
README.md CHANGED
@@ -57,8 +57,8 @@ The dataset is divided into the following two splits:
57
 
58
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
59
  | :--------- |------------:|-------:|-------------:|----------:|------------:|--------:|---------:|
60
- | Train | 144 | - | - | - | - | - | - |
61
- | Test | 100 | - | - | - | - | - | - |
62
 
63
  Statistics (#words, PRMU distributions) are computed using the title/abstract and not the full text of scientific papers.
64
 
57
 
58
  | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen |
59
  | :--------- |------------:|-------:|-------------:|----------:|------------:|--------:|---------:|
60
+ | Train | 144 | 184.6 | 15.44 | 42.16 | 7.36 | 26.85 | 23.63 |
61
+ | Test | 100 | 203.1 | 14.66 | 40.11 | 8.34 | 27.12 | 24.43 |
62
 
63
  Statistics (#words, PRMU distributions) are computed using the title/abstract and not the full text of scientific papers.
64
 
stats.ipynb ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "No config specified, defaulting to: sem_eval/raw\n",
14
+ "Reusing dataset sem_eval (/Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___sem_eval/raw/1.0.0/b40e008b5c96137733e24d9d244d70aa1fe6353ee65e180d8f6948af4027fbe4)\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "application/vnd.jupyter.widget-view+json": {
20
+ "model_id": "9379b6f5f5d1483ab184db7486ac67b5",
21
+ "version_major": 2,
22
+ "version_minor": 0
23
+ },
24
+ "text/plain": [
25
+ " 0%| | 0/2 [00:00<?, ?it/s]"
26
+ ]
27
+ },
28
+ "metadata": {},
29
+ "output_type": "display_data"
30
+ }
31
+ ],
32
+ "source": [
33
+ "from datasets import load_dataset\n",
34
+ "\n",
35
+ "dataset = load_dataset('taln-ls2n/semeval-2010-pre')"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 2,
41
+ "id": "4ba72244",
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "application/vnd.jupyter.widget-view+json": {
47
+ "model_id": "c14c3725089d4b5284e36df4cf90d3da",
48
+ "version_major": 2,
49
+ "version_minor": 0
50
+ },
51
+ "text/plain": [
52
+ " 0%| | 0/100 [00:00<?, ?it/s]"
53
+ ]
54
+ },
55
+ "metadata": {},
56
+ "output_type": "display_data"
57
+ },
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "# keyphrases: 14.66\n",
63
+ "% P: 40.11\n",
64
+ "% R: 8.34\n",
65
+ "% M: 27.12\n",
66
+ "% U: 24.43\n"
67
+ ]
68
+ }
69
+ ],
70
+ "source": [
71
+ "from tqdm.notebook import tqdm\n",
72
+ "\n",
73
+ "P, R, M, U, nb_kps = [], [], [], [], []\n",
74
+ " \n",
75
+ "for sample in tqdm(dataset['test']):\n",
76
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
77
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
78
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
79
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
80
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
81
+ " \n",
82
+ "print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
83
+ "print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
84
+ "print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
85
+ "print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
86
+ "print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 3,
92
+ "id": "52dda817",
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "import spacy\n",
97
+ "\n",
98
+ "nlp = spacy.load(\"en_core_web_sm\")\n",
99
+ "\n",
100
+ "# https://spacy.io/usage/linguistic-features#native-tokenizer-additions\n",
101
+ "\n",
102
+ "from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER\n",
103
+ "from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS\n",
104
+ "from spacy.util import compile_infix_regex\n",
105
+ "\n",
106
+ "# Modify tokenizer infix patterns\n",
107
+ "infixes = (\n",
108
+ " LIST_ELLIPSES\n",
109
+ " + LIST_ICONS\n",
110
+ " + [\n",
111
+ " r\"(?<=[0-9])[+\\-\\*^](?=[0-9-])\",\n",
112
+ " r\"(?<=[{al}{q}])\\.(?=[{au}{q}])\".format(\n",
113
+ " al=ALPHA_LOWER, au=ALPHA_UPPER, q=CONCAT_QUOTES\n",
114
+ " ),\n",
115
+ " r\"(?<=[{a}]),(?=[{a}])\".format(a=ALPHA),\n",
116
+ " # ✅ Commented out regex that splits on hyphens between letters:\n",
117
+ " # r\"(?<=[{a}])(?:{h})(?=[{a}])\".format(a=ALPHA, h=HYPHENS),\n",
118
+ " r\"(?<=[{a}0-9])[:<>=/](?=[{a}])\".format(a=ALPHA),\n",
119
+ " ]\n",
120
+ ")\n",
121
+ "\n",
122
+ "infix_re = compile_infix_regex(infixes)\n",
123
+ "nlp.tokenizer.infix_finditer = infix_re.finditer"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "execution_count": 4,
129
+ "id": "047ab1cc",
130
+ "metadata": {},
131
+ "outputs": [
132
+ {
133
+ "data": {
134
+ "application/vnd.jupyter.widget-view+json": {
135
+ "model_id": "209e7faf7c454aeabc936c07919ac1fe",
136
+ "version_major": 2,
137
+ "version_minor": 0
138
+ },
139
+ "text/plain": [
140
+ " 0%| | 0/100 [00:00<?, ?it/s]"
141
+ ]
142
+ },
143
+ "metadata": {},
144
+ "output_type": "display_data"
145
+ },
146
+ {
147
+ "name": "stdout",
148
+ "output_type": "stream",
149
+ "text": [
150
+ "avg doc len: 203.1\n"
151
+ ]
152
+ }
153
+ ],
154
+ "source": [
155
+ "doc_len = []\n",
156
+ "for sample in tqdm(dataset['test']):\n",
157
+ " doc_len.append(len(nlp(sample[\"title\"])) + len(nlp(sample[\"abstract\"])))\n",
158
+ " \n",
159
+ "print(\"avg doc len: {:.1f}\".format(sum(doc_len)/len(doc_len))) "
160
+ ]
161
+ }
162
+ ],
163
+ "metadata": {
164
+ "kernelspec": {
165
+ "display_name": "Python 3 (ipykernel)",
166
+ "language": "python",
167
+ "name": "python3"
168
+ },
169
+ "language_info": {
170
+ "codemirror_mode": {
171
+ "name": "ipython",
172
+ "version": 3
173
+ },
174
+ "file_extension": ".py",
175
+ "mimetype": "text/x-python",
176
+ "name": "python",
177
+ "nbconvert_exporter": "python",
178
+ "pygments_lexer": "ipython3",
179
+ "version": "3.9.10"
180
+ }
181
+ },
182
+ "nbformat": 4,
183
+ "nbformat_minor": 5
184
+ }