boudinfl commited on
Commit
2dce2d1
1 Parent(s): 92d5020

adding statistics

Browse files
Files changed (2) hide show
  1. README.md +6 -6
  2. stats.ipynb +177 -0
README.md CHANGED
@@ -4,7 +4,7 @@
4
 
5
  Inspec is a dataset for benchmarking keyphrase extraction and generation models.
6
  The dataset is composed of 2,000 abstracts of scientific papers collected from the [Inspec database](https://www.theiet.org/resources/inspec/).
7
- Keyphrases were annotated by professional indexers in an uncontrolled setting, that is, they are not limited to entries in a thesaurus.
8
  Details about the inspec dataset can be found in the original paper:
9
  - Anette Hulth. 2003.
10
  [Improved automatic keyword extraction given more linguistic knowledge](https://aclanthology.org/W03-1028).
@@ -19,11 +19,11 @@ Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>
19
 
20
  The dataset is divided into the following three splits:
21
 
22
- | Split | # documents |
23
- | :--------- |-----------: |
24
- | Train | 1,000 |
25
- | Test | 500 |
26
- | Validation | 500 |
27
 
28
  The following data fields are available :
29
 
 
4
 
5
  Inspec is a dataset for benchmarking keyphrase extraction and generation models.
6
  The dataset is composed of 2,000 abstracts of scientific papers collected from the [Inspec database](https://www.theiet.org/resources/inspec/).
7
+ Keyphrases were annotated by professional indexers in an uncontrolled setting (that is, not limited to thesaurus entries).
8
  Details about the inspec dataset can be found in the original paper:
9
  - Anette Hulth. 2003.
10
  [Improved automatic keyword extraction given more linguistic knowledge](https://aclanthology.org/W03-1028).
 
19
 
20
  The dataset is divided into the following three splits:
21
 
22
+ | Split | # documents | # keyphrases | % Present | % Mixed | % Reordered | % Unseen |
23
+ | :--------- | ----------: | -----------: | --------: | ------: | ----------: | -------: |
24
+ | Train | 1,000 | 9.79 | 77.83 | 9.90 | 6.30 | 5.98 |
25
+ | Test | 500 | 9.15 | 77.90 | 9.82 | 6.74 | 5.54 |
26
+ | Validation | 500 | 9.83 | 78.49 | 9.82 | 6.76 | 4.92 |
27
 
28
  The following data fields are available :
29
 
stats.ipynb ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 4,
6
+ "id": "eba2ee81",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "No config specified, defaulting to: inspec/raw\n",
14
+ "Reusing dataset inspec (/Users/boudin-f/.cache/huggingface/datasets/taln-ls2n___inspec/raw/1.1.0/156f7172f117a205a845b4378fa15df490952487673e2d2fff33b39c1173b661)\n"
15
+ ]
16
+ },
17
+ {
18
+ "data": {
19
+ "application/vnd.jupyter.widget-view+json": {
20
+ "model_id": "1ee1af5876804725adcd149763bd27b8",
21
+ "version_major": 2,
22
+ "version_minor": 0
23
+ },
24
+ "text/plain": [
25
+ " 0%| | 0/3 [00:00<?, ?it/s]"
26
+ ]
27
+ },
28
+ "metadata": {},
29
+ "output_type": "display_data"
30
+ }
31
+ ],
32
+ "source": [
33
+ "from datasets import load_dataset\n",
34
+ "\n",
35
+ "dataset = load_dataset('taln-ls2n/inspec')"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 20,
41
+ "id": "4ba72244",
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "application/vnd.jupyter.widget-view+json": {
47
+ "model_id": "a14738b9d72b45d29d24cd764e272fd3",
48
+ "version_major": 2,
49
+ "version_minor": 0
50
+ },
51
+ "text/plain": [
52
+ " 0%| | 0/1000 [00:00<?, ?it/s]"
53
+ ]
54
+ },
55
+ "metadata": {},
56
+ "output_type": "display_data"
57
+ },
58
+ {
59
+ "name": "stdout",
60
+ "output_type": "stream",
61
+ "text": [
62
+ "statistics for train\n",
63
+ "# keyphrases: 9.79\n",
64
+ "% P: 77.83\n",
65
+ "% R: 9.90\n",
66
+ "% M: 6.30\n",
67
+ "% U: 5.98\n"
68
+ ]
69
+ },
70
+ {
71
+ "data": {
72
+ "application/vnd.jupyter.widget-view+json": {
73
+ "model_id": "59a4352f94d84ada80beb34607d63425",
74
+ "version_major": 2,
75
+ "version_minor": 0
76
+ },
77
+ "text/plain": [
78
+ " 0%| | 0/500 [00:00<?, ?it/s]"
79
+ ]
80
+ },
81
+ "metadata": {},
82
+ "output_type": "display_data"
83
+ },
84
+ {
85
+ "name": "stdout",
86
+ "output_type": "stream",
87
+ "text": [
88
+ "statistics for validation\n",
89
+ "# keyphrases: 9.15\n",
90
+ "% P: 77.90\n",
91
+ "% R: 9.82\n",
92
+ "% M: 6.74\n",
93
+ "% U: 5.54\n"
94
+ ]
95
+ },
96
+ {
97
+ "data": {
98
+ "application/vnd.jupyter.widget-view+json": {
99
+ "model_id": "77346b747c5248fb9566d8665c7017bb",
100
+ "version_major": 2,
101
+ "version_minor": 0
102
+ },
103
+ "text/plain": [
104
+ " 0%| | 0/500 [00:00<?, ?it/s]"
105
+ ]
106
+ },
107
+ "metadata": {},
108
+ "output_type": "display_data"
109
+ },
110
+ {
111
+ "name": "stdout",
112
+ "output_type": "stream",
113
+ "text": [
114
+ "statistics for test\n",
115
+ "# keyphrases: 9.83\n",
116
+ "% P: 78.49\n",
117
+ "% R: 9.82\n",
118
+ "% M: 6.76\n",
119
+ "% U: 4.92\n"
120
+ ]
121
+ }
122
+ ],
123
+ "source": [
124
+ "from tqdm.notebook import tqdm\n",
125
+ "\n",
126
+ "\n",
127
+ "\n",
128
+ "for split in ['train', 'validation', 'test']:\n",
129
+ " \n",
130
+ " P, R, M, U, nb_kps = [], [], [], [], []\n",
131
+ " \n",
132
+ " for sample in tqdm(dataset[split]):\n",
133
+ " nb_kps.append(len(sample[\"keyphrases\"]))\n",
134
+ " P.append(sample[\"prmu\"].count(\"P\") / nb_kps[-1])\n",
135
+ " R.append(sample[\"prmu\"].count(\"R\") / nb_kps[-1])\n",
136
+ " M.append(sample[\"prmu\"].count(\"M\") / nb_kps[-1])\n",
137
+ " U.append(sample[\"prmu\"].count(\"U\") / nb_kps[-1])\n",
138
+ " \n",
139
+ " print(\"statistics for {}\".format(split))\n",
140
+ " print(\"# keyphrases: {:.2f}\".format(sum(nb_kps)/len(nb_kps)))\n",
141
+ " print(\"% P: {:.2f}\".format(sum(P)/len(P)*100))\n",
142
+ " print(\"% R: {:.2f}\".format(sum(R)/len(R)*100))\n",
143
+ " print(\"% M: {:.2f}\".format(sum(M)/len(M)*100))\n",
144
+ " print(\"% U: {:.2f}\".format(sum(U)/len(U)*100))"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "4e4dcdab",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": []
154
+ }
155
+ ],
156
+ "metadata": {
157
+ "kernelspec": {
158
+ "display_name": "Python 3 (ipykernel)",
159
+ "language": "python",
160
+ "name": "python3"
161
+ },
162
+ "language_info": {
163
+ "codemirror_mode": {
164
+ "name": "ipython",
165
+ "version": 3
166
+ },
167
+ "file_extension": ".py",
168
+ "mimetype": "text/x-python",
169
+ "name": "python",
170
+ "nbconvert_exporter": "python",
171
+ "pygments_lexer": "ipython3",
172
+ "version": "3.10.2"
173
+ }
174
+ },
175
+ "nbformat": 4,
176
+ "nbformat_minor": 5
177
+ }