ju-bezdek commited on
Commit
bb7591d
1 Parent(s): 60459c8

Initial commit

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ data/train.json filter=lfs diff=lfs merge=lfs -text
29
+ data/valid.json filter=lfs diff=lfs merge=lfs -text
30
+ data/test.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ .DS_Store
conll2003-sk-ner.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Introduction to the CoNLL-2003-SK-NER Shared Task: Slovak Named Entity Recognition"""
17
+
18
+ import datasets
19
+ import json
20
+
21
+
22
+ logger = datasets.logging.get_logger(__name__)
23
+
24
+
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ This is translated version of the original CONLL2003 dataset (translated from English to Slovak via Google translate) Annotation was done mostly automatically with word matching scripts. Records where some tags were not matched, were annotated manually (10%) Unlike the original Conll2003 dataset, this one contains only NER tags
29
+ """
30
+
31
+ _URL="/data/"
32
+ #_URL = "https://github.com/ju-bezdek/conll2003-sk-ner/raw/master/data/"
33
+ _TRAINING_FILE = "train.json"
34
+ _DEV_FILE = "valid.json"
35
+ _TEST_FILE = "test.json"
36
+
37
+
38
+ class Conll2003_SK_NER_Config(datasets.BuilderConfig):
39
+
40
+
41
+ def __init__(self, **kwargs):
42
+ super(Conll2003_SK_NER_Config, self).__init__(**kwargs)
43
+
44
+
45
+ class Conll2003(datasets.GeneratorBasedBuilder):
46
+ """Conll2003 dataset."""
47
+
48
+ BUILDER_CONFIGS = [
49
+ Conll2003_SK_NER_Config(name="conll2003-SK-NER", version=datasets.Version("1.0.0"), description="Conll2003-SK-NER dataset"),
50
+ ]
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "tokens": datasets.Sequence(datasets.Value("string")),
59
+ "ner_tags": datasets.Sequence(
60
+ datasets.features.ClassLabel(
61
+ names=[
62
+ "O",
63
+ "B-PER",
64
+ "I-PER",
65
+ "B-ORG",
66
+ "I-ORG",
67
+ "B-LOC",
68
+ "I-LOC",
69
+ "B-MISC",
70
+ "I-MISC",
71
+ ]
72
+ )
73
+ ),
74
+ }
75
+ ),
76
+ supervised_keys=None,
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ """Returns SplitGenerators."""
81
+ urls_to_download = {
82
+ "train": f"{_URL}{_TRAINING_FILE}",
83
+ "dev": f"{_URL}{_DEV_FILE}",
84
+ "test": f"{_URL}{_TEST_FILE}",
85
+ }
86
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
87
+
88
+ return [
89
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
90
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
91
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
92
+ ]
93
+
94
+ def _generate_examples(self, filepath):
95
+ logger.info("⏳ Generating examples from = %s", filepath)
96
+ with open(filepath, encoding="utf-8") as f:
97
+ guid = 0
98
+ for line in f:
99
+ record = json.loads(line)
100
+ yield guid, record
101
+ guid += 1
102
+
103
+
data/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0280959ea836976f2f0c8db8fdf5cb1b680f5e872cc4b9716339de8dc2a02ff
3
+ size 650684
data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4fe71debdfc0db1f3485bbcdb97b84086711956e235b5cd2c6d9a82fdfc5db6
3
+ size 2680237
data/valid.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea36d887933c6b0420399f940a878fbcc49b190ff4e1cade785c700c59d2ec5c
3
+ size 702155
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"conll2003-SK-NER": {"description": "This is translated version of the original CONLL2003 dataset (translated from English to Slovak via Google translate) Annotation was done mostly automatically with word matching scripts. Records where some tags were not matched, were annotated manually (10%) Unlike the original Conll2003 dataset, this one contains only NER tags\n", "citation": "", "homepage": "", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "ner_tags": {"feature": {"num_classes": 9, "names": ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "conll2003", "config_name": "conll2003-SK-NER", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3333648, "num_examples": 14041, "dataset_name": "conll2003"}, "validation": {"name": "validation", "num_bytes": 927447, "num_examples": 3250, "dataset_name": "conll2003"}, "test": {"name": "test", "num_bytes": 853542, "num_examples": 3453, "dataset_name": "conll2003"}}, "download_checksums": {"https://github.com/ju-bezdek/conll2003-sk-ner/raw/master/data/train.json": {"num_bytes": 2680237, "checksum": "f4fe71debdfc0db1f3485bbcdb97b84086711956e235b5cd2c6d9a82fdfc5db6"}, "https://github.com/ju-bezdek/conll2003-sk-ner/raw/master/data/valid.json": {"num_bytes": 702155, "checksum": "ea36d887933c6b0420399f940a878fbcc49b190ff4e1cade785c700c59d2ec5c"}, "https://github.com/ju-bezdek/conll2003-sk-ner/raw/master/data/test.json": {"num_bytes": 650684, "checksum": "b0280959ea836976f2f0c8db8fdf5cb1b680f5e872cc4b9716339de8dc2a02ff"}}, "download_size": 4033076, "post_processing_size": null, "dataset_size": 5114637, "size_in_bytes": 9147713}}
readme.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ - expert-generated
5
+ language_creators:
6
+ - found
7
+ languages:
8
+ - sk
9
+ licenses:
10
+ - unknown
11
+ multilinguality:
12
+ - monolingual
13
+ pretty_name: 'conll-2003-sk-ner'
14
+ size_categories:
15
+ - 10K<n<100K
16
+ source_datasets:
17
+ - extended|conll2003
18
+ task_categories:
19
+ - structure-prediction
20
+ task_ids:
21
+ - named-entity-recognition
22
+ - part-of-speech-tagging
23
+ ---
24
+
25
+ # Dataset Card for [Dataset Name]
26
+
27
+ ## Table of Contents
28
+ - [Dataset Card for [Dataset Name]](#dataset-card-for-dataset-name)
29
+ - [Table of Contents](#table-of-contents)
30
+ - [Dataset Description](#dataset-description)
31
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
32
+ - [Languages](#languages)
33
+ - [Dataset Structure](#dataset-structure)
34
+ - [Data Splits](#data-splits)
35
+ - [Dataset Creation](#dataset-creation)
36
+ - [Source Data](#source-data)
37
+ - [Annotations](#annotations)
38
+ - [Annotation process](#annotation-process)
39
+
40
+
41
+
42
+
43
+ ## Dataset Description
44
+ This is translated version of the original CONLL2003 dataset (translated from English to Slovak via Google translate) Annotation was done mostly automatically with word matching scripts. Records where some tags were not matched, were annotated manually (10%) Unlike the original Conll2003 dataset, this one contains only NER tags
45
+
46
+ - **Point of Contact: [@ju-bezdek](https://github.com/ju-bezdek) **
47
+
48
+
49
+ ### Supported Tasks and Leaderboards
50
+
51
+ NER
52
+
53
+ labels:
54
+
55
+ - 0: O
56
+ - 1: B-PER
57
+ - 2: I-PER
58
+ - 3: B-ORG
59
+ - 4: I-ORG
60
+ - 5: B-LOC
61
+ - 6: I-LOC
62
+ - 7: B-MISC
63
+ - 8: I-MISC
64
+
65
+ ### Languages
66
+
67
+ sk
68
+
69
+ ## Dataset Structure
70
+
71
+ ### Data Splits
72
+
73
+ train, test, val
74
+
75
+ ## Dataset Creation
76
+
77
+ ### Source Data
78
+ https://huggingface.co/datasets/conll2003
79
+
80
+ ### Annotations
81
+
82
+ #### Annotation process
83
+
84
+ - Machine Translation
85
+ - Machine pairing tags with reverse translation, and hardcoded rules (including phrase regex matching etc.)
86
+ - Manual annotation of records that couldn't be automatically matched