stefan-it commited on
Commit
64e8aea
1 Parent(s): 946103b

notebook: add example for the creation of Wikipedia filtered-out GermEval 2014 dataset

Browse files
Files changed (1) hide show
  1. CreateDataset.ipynb +530 -0
CreateDataset.ipynb ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "1a6ca1fd-66c6-4da6-af7f-35879fb663ba",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Requirement already satisfied: tldextract in /home/stefan/.venvs/flair/lib/python3.12/site-packages (5.1.2)\n",
14
+ "Requirement already satisfied: idna in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from tldextract) (3.7)\n",
15
+ "Requirement already satisfied: requests>=2.1.0 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from tldextract) (2.32.2)\n",
16
+ "Requirement already satisfied: requests-file>=1.4 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from tldextract) (2.1.0)\n",
17
+ "Requirement already satisfied: filelock>=3.0.8 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from tldextract) (3.13.1)\n",
18
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from requests>=2.1.0->tldextract) (3.3.2)\n",
19
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from requests>=2.1.0->tldextract) (1.26.18)\n",
20
+ "Requirement already satisfied: certifi>=2017.4.17 in /home/stefan/.venvs/flair/lib/python3.12/site-packages (from requests>=2.1.0->tldextract) (2024.2.2)\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "!pip3 install tldextract"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 2,
31
+ "id": "4906d36b-6a06-4987-b032-4de6a11bfc65",
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "import random\n",
36
+ "import tldextract\n",
37
+ "\n",
38
+ "from collections import Counter\n",
39
+ "from tabulate import tabulate"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": 3,
45
+ "id": "0107c076-c40a-41ae-bf51-92ccddaf2e61",
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "dataset_splits = {\n",
50
+ " \"train\": \"./original/NER-de-train.tsv\",\n",
51
+ " \"dev\": \"./original/NER-de-dev.tsv\",\n",
52
+ " \"test\": \"./original/NER-de-test.tsv\",\n",
53
+ "}"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "markdown",
58
+ "id": "b11a81f2-0e71-4bbc-9538-b1f3295c4b38",
59
+ "metadata": {},
60
+ "source": [
61
+ "# Dataset Stats"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 4,
67
+ "id": "530c2634-19ee-4814-842e-b834024fa8c9",
68
+ "metadata": {},
69
+ "outputs": [
70
+ {
71
+ "name": "stdout",
72
+ "output_type": "stream",
73
+ "text": [
74
+ "GermEval 2014 Dataset Stats for train split:\n",
75
+ "| TLD | Number of examples (Percentage) |\n",
76
+ "|----------------------|-----------------------------------|\n",
77
+ "| wikipedia.org | 12007 (50.03%) |\n",
78
+ "| welt.de | 662 (2.76%) |\n",
79
+ "| spiegel.de | 512 (2.13%) |\n",
80
+ "| tagesspiegel.de | 424 (1.77%) |\n",
81
+ "| handelsblatt.com | 369 (1.54%) |\n",
82
+ "| fr-aktuell.de | 344 (1.43%) |\n",
83
+ "| sueddeutsche.de | 308 (1.28%) |\n",
84
+ "| abendblatt.de | 283 (1.18%) |\n",
85
+ "| berlinonline.de | 255 (1.06%) |\n",
86
+ "| szon.de | 249 (1.04%) |\n",
87
+ "| n-tv.de | 195 (0.81%) |\n",
88
+ "| yahoo.com | 192 (0.8%) |\n",
89
+ "| feedsportal.com | 173 (0.72%) |\n",
90
+ "| ngz-online.de | 173 (0.72%) |\n",
91
+ "| faz.net | 156 (0.65%) |\n",
92
+ "| nzz.ch | 146 (0.61%) |\n",
93
+ "| morgenweb.de | 134 (0.56%) |\n",
94
+ "| rp-online.de | 132 (0.55%) |\n",
95
+ "| gea.de | 131 (0.55%) |\n",
96
+ "| sat1.de | 126 (0.53%) |\n",
97
+ "| tagesschau.de | 124 (0.52%) |\n",
98
+ "| pnp.de | 101 (0.42%) |\n",
99
+ "| orf.at | 98 (0.41%) |\n",
100
+ "| n24.de | 98 (0.41%) |\n",
101
+ "| finanznachrichten.de | 91 (0.38%) |\n",
102
+ "\n",
103
+ "GermEval 2014 Dataset Stats for dev split:\n",
104
+ "| TLD | Number of examples (Percentage) |\n",
105
+ "|----------------------|-----------------------------------|\n",
106
+ "| wikipedia.org | 1119 (50.86%) |\n",
107
+ "| welt.de | 46 (2.09%) |\n",
108
+ "| spiegel.de | 43 (1.95%) |\n",
109
+ "| fr-aktuell.de | 38 (1.73%) |\n",
110
+ "| tagesspiegel.de | 37 (1.68%) |\n",
111
+ "| handelsblatt.com | 35 (1.59%) |\n",
112
+ "| sueddeutsche.de | 28 (1.27%) |\n",
113
+ "| szon.de | 25 (1.14%) |\n",
114
+ "| feedsportal.com | 24 (1.09%) |\n",
115
+ "| berlinonline.de | 22 (1.0%) |\n",
116
+ "| rp-online.de | 21 (0.95%) |\n",
117
+ "| abendblatt.de | 20 (0.91%) |\n",
118
+ "| ngz-online.de | 19 (0.86%) |\n",
119
+ "| n-tv.de | 18 (0.82%) |\n",
120
+ "| yahoo.com | 15 (0.68%) |\n",
121
+ "| sat1.de | 15 (0.68%) |\n",
122
+ "| orf.at | 13 (0.59%) |\n",
123
+ "| finanznachrichten.de | 13 (0.59%) |\n",
124
+ "| tagesschau.de | 13 (0.59%) |\n",
125
+ "| nzz.ch | 12 (0.55%) |\n",
126
+ "| faz.net | 12 (0.55%) |\n",
127
+ "| morgenweb.de | 12 (0.55%) |\n",
128
+ "| 20min.ch | 11 (0.5%) |\n",
129
+ "| pnp.de | 11 (0.5%) |\n",
130
+ "| focus.de | 10 (0.45%) |\n",
131
+ "\n",
132
+ "GermEval 2014 Dataset Stats for test split:\n",
133
+ "| TLD | Number of examples (Percentage) |\n",
134
+ "|----------------------|-----------------------------------|\n",
135
+ "| wikipedia.org | 2547 (49.94%) |\n",
136
+ "| welt.de | 139 (2.73%) |\n",
137
+ "| spiegel.de | 88 (1.73%) |\n",
138
+ "| tagesspiegel.de | 86 (1.69%) |\n",
139
+ "| handelsblatt.com | 84 (1.65%) |\n",
140
+ "| sueddeutsche.de | 78 (1.53%) |\n",
141
+ "| abendblatt.de | 72 (1.41%) |\n",
142
+ "| fr-aktuell.de | 62 (1.22%) |\n",
143
+ "| berlinonline.de | 59 (1.16%) |\n",
144
+ "| szon.de | 57 (1.12%) |\n",
145
+ "| feedsportal.com | 52 (1.02%) |\n",
146
+ "| n-tv.de | 47 (0.92%) |\n",
147
+ "| sat1.de | 42 (0.82%) |\n",
148
+ "| nzz.ch | 39 (0.76%) |\n",
149
+ "| yahoo.com | 38 (0.75%) |\n",
150
+ "| ngz-online.de | 37 (0.73%) |\n",
151
+ "| faz.net | 37 (0.73%) |\n",
152
+ "| morgenweb.de | 36 (0.71%) |\n",
153
+ "| taz.de | 28 (0.55%) |\n",
154
+ "| finanznachrichten.de | 25 (0.49%) |\n",
155
+ "| tagesschau.de | 24 (0.47%) |\n",
156
+ "| gea.de | 24 (0.47%) |\n",
157
+ "| bernerzeitung.ch | 23 (0.45%) |\n",
158
+ "| ftd.de | 22 (0.43%) |\n",
159
+ "| orf.at | 21 (0.41%) |\n",
160
+ "\n"
161
+ ]
162
+ }
163
+ ],
164
+ "source": [
165
+ "def print_stats(dataset_split, dataset_path, limit=25):\n",
166
+ " hostname_counter = Counter()\n",
167
+ "\n",
168
+ " with open(dataset_path, \"rt\") as f_p:\n",
169
+ " for line in f_p:\n",
170
+ " if not line.startswith(\"#\"):\n",
171
+ " continue\n",
172
+ " \n",
173
+ " current_url = line.split(\"\\t\")[1].split(\" \")[0]\n",
174
+ " \n",
175
+ " ext = tldextract.extract(current_url)\n",
176
+ " \n",
177
+ " hostname = ext.registered_domain\n",
178
+ " \n",
179
+ " hostname_counter[hostname] += 1\n",
180
+ "\n",
181
+ " # Print nice table\n",
182
+ " headers = [\"TLD\", \"Number of examples (Percentage)\"]\n",
183
+ "\n",
184
+ " table = []\n",
185
+ "\n",
186
+ " total_examples = sum(hostname_counter.values())\n",
187
+ " \n",
188
+ " for tld_name, examples in hostname_counter.most_common(limit):\n",
189
+ " current_percentage = round(examples / total_examples * 100, 2)\n",
190
+ " table.append([tld_name, f\"{examples} ({current_percentage}%)\"])\n",
191
+ "\n",
192
+ " print(tabulate(table, headers=headers, tablefmt=\"github\"))\n",
193
+ "\n",
194
+ "for dataset_split in dataset_splits.keys():\n",
195
+ " print(f\"GermEval 2014 Dataset Stats for {dataset_split} split:\")\n",
196
+ " print_stats(dataset_split, dataset_splits[dataset_split])\n",
197
+ " print(\"\")"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "markdown",
202
+ "id": "d97e3e86-ae0e-4a54-af15-de195608a071",
203
+ "metadata": {},
204
+ "source": [
205
+ "# Generate New Dataset Split without Wikipedia"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": 5,
211
+ "id": "4a081af0-f2fe-4e1a-ae56-da10b2bd7be1",
212
+ "metadata": {},
213
+ "outputs": [
214
+ {
215
+ "name": "stdout",
216
+ "output_type": "stream",
217
+ "text": [
218
+ "Writing out train split...\n",
219
+ "Writing out dev split...\n",
220
+ "Writing out test split...\n"
221
+ ]
222
+ }
223
+ ],
224
+ "source": [
225
+ "def filter_out_wikipedia(dataset_split):\n",
226
+ " with open(dataset_splits[dataset_split], \"rt\") as f_p:\n",
227
+ " all_sentences = []\n",
228
+ " current_sentence = []\n",
229
+ " for line in f_p:\n",
230
+ " line = line.strip()\n",
231
+ " if not line:\n",
232
+ " # We found new sentence, yeah!\n",
233
+ "\n",
234
+ " if len(current_sentence) == 0:\n",
235
+ " continue\n",
236
+ " \n",
237
+ " all_sentences.append(current_sentence)\n",
238
+ " current_sentence = []\n",
239
+ " continue\n",
240
+ "\n",
241
+ " current_sentence.append(line)\n",
242
+ "\n",
243
+ " if len(current_sentence) > 0:\n",
244
+ " all_sentences.append(current_sentence)\n",
245
+ " \n",
246
+ " with open(f\"NER-de-without-wikipedia-{dataset_split}.tsv\", \"wt\") as f_out:\n",
247
+ " for sentence in all_sentences:\n",
248
+ "\n",
249
+ " header = sentence[0]\n",
250
+ " assert header.startswith(\"#\")\n",
251
+ "\n",
252
+ " current_url = header.split(\"\\t\")[1].split(\" \")[0]\n",
253
+ " ext = tldextract.extract(current_url)\n",
254
+ " hostname = ext.registered_domain\n",
255
+ "\n",
256
+ " if hostname == \"wikipedia.org\":\n",
257
+ " continue\n",
258
+ "\n",
259
+ " f_out.write(\"\\n\".join(sentence) + \"\\n\\n\")\n",
260
+ "\n",
261
+ "for dataset_split in dataset_splits.keys():\n",
262
+ " print(f\"Writing out {dataset_split} split...\")\n",
263
+ " filter_out_wikipedia(dataset_split)"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "markdown",
268
+ "id": "84321c99-7085-467d-8d9c-7966bddd21de",
269
+ "metadata": {},
270
+ "source": [
271
+ "# Load with Flair"
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "code",
276
+ "execution_count": 6,
277
+ "id": "be671ee3-e562-43a5-8734-144b3a129993",
278
+ "metadata": {},
279
+ "outputs": [
280
+ {
281
+ "name": "stderr",
282
+ "output_type": "stream",
283
+ "text": [
284
+ "2024-05-29 17:08:26.593572: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
285
+ "2024-05-29 17:08:26.597226: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
286
+ "2024-05-29 17:08:26.644331: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
287
+ "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
288
+ "2024-05-29 17:08:27.578466: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
289
+ ]
290
+ }
291
+ ],
292
+ "source": [
293
+ "import flair\n",
294
+ "\n",
295
+ "from flair.datasets import NER_GERMAN_GERMEVAL\n",
296
+ "from flair.datasets.sequence_labeling import ColumnCorpus\n",
297
+ "from flair.file_utils import cached_path\n",
298
+ "\n",
299
+ "from pathlib import Path\n",
300
+ "from typing import Optional, Union\n",
301
+ "\n",
302
+ "\n",
303
+ "class NER_GERMEVAL_2014_NO_WIKIPEDIA(ColumnCorpus):\n",
304
+ " def __init__(\n",
305
+ " self,\n",
306
+ " base_path: Optional[Union[str, Path]] = None,\n",
307
+ " in_memory: bool = True,\n",
308
+ " **corpusargs,\n",
309
+ " ) -> None:\n",
310
+ " base_path = flair.cache_root / \"datasets\" if not base_path else Path(base_path)\n",
311
+ " dataset_name = self.__class__.__name__.lower()\n",
312
+ " data_folder = base_path / dataset_name\n",
313
+ " data_path = flair.cache_root / \"datasets\" / dataset_name\n",
314
+ "\n",
315
+ " column_format = {1: \"text\", 2: \"ner\"}\n",
316
+ "\n",
317
+ " #hf_download_path = \"https://huggingface.co/datasets/stefan-it/germeval14_no_wikipedia/resolve/main\"\n",
318
+ "\n",
319
+ " #for split in [\"train\", \"dev\", \"test\"]:\n",
320
+ " # cached_path(f\"{hf_download_path}/NER-de-without-wikipedia-{split}.tsv\", data_path)\n",
321
+ " \n",
322
+ " super().__init__(\n",
323
+ " \"./\", #data_folder,\n",
324
+ " column_format = {0: \"text\", 1: \"ner\"},\n",
325
+ " column_delimiter=\"\\t\",\n",
326
+ " document_separator_token=\"-DOCSTART-\",\n",
327
+ " in_memory=in_memory,\n",
328
+ " comment_symbol=\"# \",\n",
329
+ " **corpusargs,\n",
330
+ " )"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "markdown",
335
+ "id": "7c9d0275-dc4e-4ca0-9a49-b582a42d6bca",
336
+ "metadata": {},
337
+ "source": [
338
+ "# New Corpus Stats"
339
+ ]
340
+ },
341
+ {
342
+ "cell_type": "code",
343
+ "execution_count": 7,
344
+ "id": "8b1a34ab-42c5-4e72-a63e-823f7f2b3cbe",
345
+ "metadata": {},
346
+ "outputs": [
347
+ {
348
+ "name": "stdout",
349
+ "output_type": "stream",
350
+ "text": [
351
+ "2024-05-29 17:08:30,120 Reading data from .\n",
352
+ "2024-05-29 17:08:30,121 Train: NER-de-without-wikipedia-train.tsv\n",
353
+ "2024-05-29 17:08:30,123 Dev: NER-de-without-wikipedia-dev.tsv\n",
354
+ "2024-05-29 17:08:30,124 Test: NER-de-without-wikipedia-test.tsv\n",
355
+ "2024-05-29 17:08:34,319 Reading data from /home/stefan/.flair/datasets/ner_german_germeval\n",
356
+ "2024-05-29 17:08:34,320 Train: /home/stefan/.flair/datasets/ner_german_germeval/train.tsv\n",
357
+ "2024-05-29 17:08:34,321 Dev: /home/stefan/.flair/datasets/ner_german_germeval/dev.tsv\n",
358
+ "2024-05-29 17:08:34,321 Test: /home/stefan/.flair/datasets/ner_german_germeval/test.tsv\n"
359
+ ]
360
+ }
361
+ ],
362
+ "source": [
363
+ "corpus = NER_GERMEVAL_2014_NO_WIKIPEDIA()\n",
364
+ "original_corpus = NER_GERMAN_GERMEVAL()"
365
+ ]
366
+ },
367
+ {
368
+ "cell_type": "code",
369
+ "execution_count": 8,
370
+ "id": "86047123-c882-433b-8d19-92626e878afd",
371
+ "metadata": {},
372
+ "outputs": [
373
+ {
374
+ "name": "stdout",
375
+ "output_type": "stream",
376
+ "text": [
377
+ "Original GermEval 2014 stats: Corpus: 24000 train + 2200 dev + 5100 test sentences\n",
378
+ "Filtered-out GermEval 2014 stats: Corpus: 11993 train + 1081 dev + 2553 test sentences\n"
379
+ ]
380
+ }
381
+ ],
382
+ "source": [
383
+ "print(\"Original GermEval 2014 stats:\", str(original_corpus))\n",
384
+ "print(\"Filtered-out GermEval 2014 stats:\", str(corpus))"
385
+ ]
386
+ },
387
+ {
388
+ "cell_type": "code",
389
+ "execution_count": 9,
390
+ "id": "9e6ffb09-b1c0-4559-b954-df8373492876",
391
+ "metadata": {},
392
+ "outputs": [],
393
+ "source": [
394
+ "new_dataset_splits = {\n",
395
+ " \"train\": \"./NER-de-without-wikipedia-train.tsv\",\n",
396
+ " \"dev\": \"./NER-de-without-wikipedia-dev.tsv\",\n",
397
+ " \"test\": \"./NER-de-without-wikipedia-test.tsv\",\n",
398
+ "}"
399
+ ]
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "execution_count": 11,
404
+ "id": "9ea60c74-70e2-473c-96c7-b6cdfc601297",
405
+ "metadata": {},
406
+ "outputs": [
407
+ {
408
+ "name": "stdout",
409
+ "output_type": "stream",
410
+ "text": [
411
+ "New GermEval 2014 Dataset Stats for train split:\n",
412
+ "| TLD | Number of examples (Percentage) |\n",
413
+ "|----------------------|-----------------------------------|\n",
414
+ "| welt.de | 662 (5.52%) |\n",
415
+ "| spiegel.de | 512 (4.27%) |\n",
416
+ "| tagesspiegel.de | 424 (3.54%) |\n",
417
+ "| handelsblatt.com | 369 (3.08%) |\n",
418
+ "| fr-aktuell.de | 344 (2.87%) |\n",
419
+ "| sueddeutsche.de | 308 (2.57%) |\n",
420
+ "| abendblatt.de | 283 (2.36%) |\n",
421
+ "| berlinonline.de | 255 (2.13%) |\n",
422
+ "| szon.de | 249 (2.08%) |\n",
423
+ "| n-tv.de | 195 (1.63%) |\n",
424
+ "| yahoo.com | 192 (1.6%) |\n",
425
+ "| feedsportal.com | 173 (1.44%) |\n",
426
+ "| ngz-online.de | 173 (1.44%) |\n",
427
+ "| faz.net | 156 (1.3%) |\n",
428
+ "| nzz.ch | 146 (1.22%) |\n",
429
+ "| morgenweb.de | 134 (1.12%) |\n",
430
+ "| rp-online.de | 132 (1.1%) |\n",
431
+ "| gea.de | 131 (1.09%) |\n",
432
+ "| sat1.de | 126 (1.05%) |\n",
433
+ "| tagesschau.de | 124 (1.03%) |\n",
434
+ "| pnp.de | 101 (0.84%) |\n",
435
+ "| orf.at | 98 (0.82%) |\n",
436
+ "| n24.de | 98 (0.82%) |\n",
437
+ "| finanznachrichten.de | 91 (0.76%) |\n",
438
+ "| taz.de | 91 (0.76%) |\n",
439
+ "\n",
440
+ "New GermEval 2014 Dataset Stats for dev split:\n",
441
+ "| TLD | Number of examples (Percentage) |\n",
442
+ "|----------------------|-----------------------------------|\n",
443
+ "| welt.de | 46 (4.26%) |\n",
444
+ "| spiegel.de | 43 (3.98%) |\n",
445
+ "| fr-aktuell.de | 38 (3.52%) |\n",
446
+ "| tagesspiegel.de | 37 (3.42%) |\n",
447
+ "| handelsblatt.com | 35 (3.24%) |\n",
448
+ "| sueddeutsche.de | 28 (2.59%) |\n",
449
+ "| szon.de | 25 (2.31%) |\n",
450
+ "| feedsportal.com | 24 (2.22%) |\n",
451
+ "| berlinonline.de | 22 (2.04%) |\n",
452
+ "| rp-online.de | 21 (1.94%) |\n",
453
+ "| abendblatt.de | 20 (1.85%) |\n",
454
+ "| ngz-online.de | 19 (1.76%) |\n",
455
+ "| n-tv.de | 18 (1.67%) |\n",
456
+ "| yahoo.com | 15 (1.39%) |\n",
457
+ "| sat1.de | 15 (1.39%) |\n",
458
+ "| orf.at | 13 (1.2%) |\n",
459
+ "| finanznachrichten.de | 13 (1.2%) |\n",
460
+ "| tagesschau.de | 13 (1.2%) |\n",
461
+ "| nzz.ch | 12 (1.11%) |\n",
462
+ "| faz.net | 12 (1.11%) |\n",
463
+ "| morgenweb.de | 12 (1.11%) |\n",
464
+ "| 20min.ch | 11 (1.02%) |\n",
465
+ "| pnp.de | 11 (1.02%) |\n",
466
+ "| focus.de | 10 (0.93%) |\n",
467
+ "| ftd.de | 9 (0.83%) |\n",
468
+ "\n",
469
+ "New GermEval 2014 Dataset Stats for test split:\n",
470
+ "| TLD | Number of examples (Percentage) |\n",
471
+ "|----------------------|-----------------------------------|\n",
472
+ "| welt.de | 139 (5.44%) |\n",
473
+ "| spiegel.de | 88 (3.45%) |\n",
474
+ "| tagesspiegel.de | 86 (3.37%) |\n",
475
+ "| handelsblatt.com | 84 (3.29%) |\n",
476
+ "| sueddeutsche.de | 78 (3.06%) |\n",
477
+ "| abendblatt.de | 72 (2.82%) |\n",
478
+ "| fr-aktuell.de | 62 (2.43%) |\n",
479
+ "| berlinonline.de | 59 (2.31%) |\n",
480
+ "| szon.de | 57 (2.23%) |\n",
481
+ "| feedsportal.com | 52 (2.04%) |\n",
482
+ "| n-tv.de | 47 (1.84%) |\n",
483
+ "| sat1.de | 42 (1.65%) |\n",
484
+ "| nzz.ch | 39 (1.53%) |\n",
485
+ "| yahoo.com | 38 (1.49%) |\n",
486
+ "| ngz-online.de | 37 (1.45%) |\n",
487
+ "| faz.net | 37 (1.45%) |\n",
488
+ "| morgenweb.de | 36 (1.41%) |\n",
489
+ "| taz.de | 28 (1.1%) |\n",
490
+ "| finanznachrichten.de | 25 (0.98%) |\n",
491
+ "| tagesschau.de | 24 (0.94%) |\n",
492
+ "| gea.de | 24 (0.94%) |\n",
493
+ "| bernerzeitung.ch | 23 (0.9%) |\n",
494
+ "| ftd.de | 22 (0.86%) |\n",
495
+ "| orf.at | 21 (0.82%) |\n",
496
+ "| rp-online.de | 21 (0.82%) |\n",
497
+ "\n"
498
+ ]
499
+ }
500
+ ],
501
+ "source": [
502
+ "for dataset_split in new_dataset_splits.keys():\n",
503
+ " print(f\"New GermEval 2014 Dataset Stats for {dataset_split} split:\")\n",
504
+ " print_stats(dataset_split, new_dataset_splits[dataset_split])\n",
505
+ " print(\"\")"
506
+ ]
507
+ }
508
+ ],
509
+ "metadata": {
510
+ "kernelspec": {
511
+ "display_name": "Python 3 (ipykernel)",
512
+ "language": "python",
513
+ "name": "python3"
514
+ },
515
+ "language_info": {
516
+ "codemirror_mode": {
517
+ "name": "ipython",
518
+ "version": 3
519
+ },
520
+ "file_extension": ".py",
521
+ "mimetype": "text/x-python",
522
+ "name": "python",
523
+ "nbconvert_exporter": "python",
524
+ "pygments_lexer": "ipython3",
525
+ "version": "3.12.3"
526
+ }
527
+ },
528
+ "nbformat": 4,
529
+ "nbformat_minor": 5
530
+ }