bileldh commited on
Commit
e36a405
1 Parent(s): 76aa662

Delete conll2003.py

Browse files
Files changed (1) hide show
  1. conll2003.py +0 -146
conll2003.py DELETED
@@ -1,146 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Created on Fri Jan 6 23:02:55 2023
4
-
5
- @author: bilel
6
- """
7
-
8
- import os
9
- import datasets
10
-
11
-
12
- logger = datasets.logging.get_logger(__name__)
13
-
14
-
15
- _CITATION = """\
16
- @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
17
- title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
18
- author = "Tjong Kim Sang, Erik F. and
19
- De Meulder, Fien",
20
- booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
21
- year = "2003",
22
- url = "https://www.aclweb.org/anthology/W03-0419",
23
- pages = "142--147",
24
- }
25
- """
26
-
27
- _DESCRIPTION = """\
28
- The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
29
- four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
30
- not belong to the previous three groups.
31
- The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
32
- a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
33
- a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
34
- and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
35
- if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
36
- B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
37
- tagging scheme, whereas the original dataset uses IOB1.
38
- For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
39
- """
40
-
41
- _URL = "https://huggingface.co/datasets/bileldh/conll2003/resolve/main/conll2003.rar"
42
- _TRAINING_FILE = "train.txt"
43
- _DEV_FILE = "valid.txt"
44
- _TEST_FILE = "test.txt"
45
-
46
-
47
- class Conll2003Config(datasets.BuilderConfig):
48
- """BuilderConfig for Conll2003"""
49
-
50
- def __init__(self, **kwargs):
51
- """BuilderConfig forConll2003.
52
- Args:
53
- **kwargs: keyword arguments forwarded to super.
54
- """
55
- super(Conll2003Config, self).__init__(**kwargs)
56
-
57
-
58
- class Conll2003(datasets.GeneratorBasedBuilder):
59
- """Conll2003 dataset."""
60
-
61
- BUILDER_CONFIGS = [
62
- Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
63
- ]
64
-
65
- def _info(self):
66
- return datasets.DatasetInfo(
67
- description=_DESCRIPTION,
68
- features=datasets.Features(
69
- {
70
- "id": datasets.Value("string"),
71
- "tokens": datasets.Sequence(datasets.Value("string")),
72
-
73
- "ner_tags": datasets.Sequence(
74
- datasets.features.ClassLabel(
75
- names=[
76
- "O",
77
- "B-MET",
78
- "I-MET",
79
- "B-CONT",
80
- "I-CONT",
81
- "B-EDU",
82
- "I-EDU",
83
- "B-SAL",
84
- "I-SAL",
85
- "B-CER",
86
- "I-CER",
87
- "B-EXP",
88
- "I-EXP",
89
- "B-LOC",
90
- "I-LOC",
91
- "B-ORG",
92
- "I-ORG",
93
- ]
94
- )
95
- ),
96
- }
97
- ),
98
- supervised_keys=None,
99
- homepage="https://www.aclweb.org/anthology/W03-0419/",
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
- downloaded_file = dl_manager.download_and_extract(_URL)
106
- data_files = {
107
- "train": os.path.join(downloaded_file, _TRAINING_FILE),
108
- "dev": os.path.join(downloaded_file, _DEV_FILE),
109
- "test": os.path.join(downloaded_file, _TEST_FILE),
110
- }
111
-
112
- return [
113
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
114
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
115
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
116
- ]
117
-
118
- def _generate_examples(self, filepath):
119
- logger.info("⏳ Generating examples from = %s", filepath)
120
- with open(filepath, encoding="utf-8") as f:
121
- guid = 0
122
- tokens = []
123
- ner_tags = []
124
- for line in f:
125
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
126
- if tokens:
127
- yield guid, {
128
- "id": str(guid),
129
- "tokens": tokens,
130
- "ner_tags": ner_tags,
131
- }
132
- guid += 1
133
- tokens = []
134
- ner_tags = []
135
- else:
136
- # conll2003 tokens are space separated
137
- splits = line.split(" ")
138
- tokens.append(splits[0])
139
- ner_tags.append(splits[1].rstrip())
140
- # last example
141
- if tokens:
142
- yield guid, {
143
- "id": str(guid),
144
- "tokens": tokens,
145
- "ner_tags": ner_tags,
146
- }