Kriyans commited on
Commit
930884e
1 Parent(s): d171325

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +42 -126
indian_names.py CHANGED
@@ -1,88 +1,29 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The WNUT 17 Emerging Entities Dataset."""
18
-
19
-
20
  import datasets
21
 
22
 
23
  logger = datasets.logging.get_logger(__name__)
24
 
25
 
26
- _CITATION = """\
27
- @inproceedings{derczynski-etal-2017-results,
28
- title = "Results of the {WNUT}2017 Shared Task on Novel and Emerging Entity Recognition",
29
- author = "Derczynski, Leon and
30
- Nichols, Eric and
31
- van Erp, Marieke and
32
- Limsopatham, Nut",
33
- booktitle = "Proceedings of the 3rd Workshop on Noisy User-generated Text",
34
- month = sep,
35
- year = "2017",
36
- address = "Copenhagen, Denmark",
37
- publisher = "Association for Computational Linguistics",
38
- url = "https://www.aclweb.org/anthology/W17-4418",
39
- doi = "10.18653/v1/W17-4418",
40
- pages = "140--147",
41
- abstract = "This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
42
- Named entities form the basis of many modern approaches to other tasks (like event clustering and summarization),
43
- but recall on them is a real problem in noisy text - even among annotators.
44
- This drop tends to be due to novel entities and surface forms.
45
- Take for example the tweet {``}so.. kktny in 30 mins?!{''} {--} even human experts find the entity {`}kktny{'}
46
- hard to detect and resolve. The goal of this task is to provide a definition of emerging and of rare entities,
47
- and based on that, also datasets for detecting these entities. The task as described in this paper evaluated the
48
- ability of participating entries to detect and classify novel and emerging named entities in noisy text.",
49
- }
50
- """
51
 
52
- _DESCRIPTION = """\
53
- WNUT 17: Emerging and Rare entity recognition
54
- This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
55
- Named entities form the basis of many modern approaches to other tasks (like event clustering and summarisation),
56
- but recall on them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms.
57
- Take for example the tweet “so.. kktny in 30 mins?” - even human experts find entity kktny hard to detect and resolve.
58
- This task will evaluate the ability to detect and classify novel, emerging, singleton named entities in noisy text.
59
- The goal of this task is to provide a definition of emerging and of rare entities, and based on that, also datasets for detecting these entities.
60
- """
61
 
62
- _URL = "https://github.com/Kriyansparsana/demorepo/blob/main/"
63
- _TRAINING_FILE = "wnut17train.conll"
64
- _DEV_FILE = "emerging.dev%20(1).conll"
65
- _TEST_FILE = "emerging.test.annotated"
66
-
67
-
68
- class indian_namesConfig(datasets.BuilderConfig):
69
- """The WNUT 17 Emerging Entities Dataset."""
70
 
71
  def __init__(self, **kwargs):
72
- """BuilderConfig for WNUT 17.
73
  Args:
74
  **kwargs: keyword arguments forwarded to super.
75
  """
76
- super(indian_namesConfig, self).__init__(**kwargs)
77
 
78
 
79
- class indian_names(datasets.GeneratorBasedBuilder):
80
- """The WNUT 17 Emerging Entities Dataset."""
81
 
82
  BUILDER_CONFIGS = [
83
- indian_namesConfig(
84
- name="indian_names", version=datasets.Version("1.0.0"), description="The indian_names Emerging Entities Dataset"
85
- ),
86
  ]
87
 
88
  def _info(self):
@@ -95,80 +36,55 @@ class indian_names(datasets.GeneratorBasedBuilder):
95
  "ner_tags": datasets.Sequence(
96
  datasets.features.ClassLabel(
97
  names=[
98
- "O",
99
- "B-corporation",
100
- "I-corporation",
101
- "B-creative-work",
102
- "I-creative-work",
103
- "B-group",
104
- "I-group",
105
- "B-location",
106
- "I-location",
107
- "B-person",
108
- "I-person",
109
- "B-product",
110
- "I-product",
111
  ]
112
  )
113
  ),
114
  }
115
  ),
116
  supervised_keys=None,
117
- homepage="http://noisy-text.github.io/2017/emerging-rare-entities.html",
118
- citation=_CITATION,
119
  )
120
 
121
  def _split_generators(self, dl_manager):
122
  """Returns SplitGenerators."""
123
- urls_to_download = {
124
- "train": f"{_URL}{_TRAINING_FILE}",
125
- "dev": f"{_URL}{_DEV_FILE}",
126
- "test": f"{_URL}{_TEST_FILE}",
127
-
128
  }
129
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
130
 
131
  return [
132
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
133
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
134
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
135
-
136
  ]
137
 
138
  def _generate_examples(self, filepath):
139
  logger.info("⏳ Generating examples from = %s", filepath)
140
  with open(filepath, encoding="utf-8") as f:
141
- current_tokens = []
142
- current_labels = []
143
- sentence_counter = 0
144
- for row in f:
145
- row = row.rstrip()
146
- if row:
147
- token, label = row.split("\t")
148
- current_tokens.append(token)
149
- current_labels.append(label)
 
 
 
 
 
 
150
  else:
151
- # New sentence
152
- if not current_tokens:
153
- # Consecutive empty lines will cause empty sentences
154
- continue
155
- assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
156
- sentence = (
157
- sentence_counter,
158
- {
159
- "id": str(sentence_counter),
160
- "tokens": current_tokens,
161
- "ner_tags": current_labels,
162
- },
163
- )
164
- sentence_counter += 1
165
- current_tokens = []
166
- current_labels = []
167
- yield sentence
168
- # Don't forget last sentence in dataset 🧐
169
- if current_tokens:
170
- yield sentence_counter, {
171
- "id": str(sentence_counter),
172
- "tokens": current_tokens,
173
- "ner_tags": current_labels,
174
- }
 
1
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import datasets
3
 
4
 
5
  logger = datasets.logging.get_logger(__name__)
6
 
7
 
8
+ _URL = "https://github.com/Kriyansparsana/demorepo/blob/main/wnut17train.conll"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
10
 
11
+ class Conll2003Config(datasets.BuilderConfig):
12
+ """BuilderConfig for Conll2003"""
 
 
 
 
 
 
13
 
14
  def __init__(self, **kwargs):
15
+ """BuilderConfig forConll2003.
16
  Args:
17
  **kwargs: keyword arguments forwarded to super.
18
  """
19
+ super(Conll2003Config, self).__init__(**kwargs)
20
 
21
 
22
+ class Conll2003(datasets.GeneratorBasedBuilder):
23
+ """Conll2003 dataset."""
24
 
25
  BUILDER_CONFIGS = [
26
+ Conll2003Config(name="conll2003", version=datasets.Version("1.0.0"), description="Conll2003 dataset"),
 
 
27
  ]
28
 
29
  def _info(self):
 
36
  "ner_tags": datasets.Sequence(
37
  datasets.features.ClassLabel(
38
  names=[
39
+ "B-PER",
40
+ "B-ORG",
41
+
 
 
 
 
 
 
 
 
 
 
42
  ]
43
  )
44
  ),
45
  }
46
  ),
47
  supervised_keys=None,
 
 
48
  )
49
 
50
  def _split_generators(self, dl_manager):
51
  """Returns SplitGenerators."""
52
+ downloaded_file = dl_manager.download_and_extract(_URL)
53
+ data_files = {
54
+ "train": os.path.join(downloaded_file),
 
 
55
  }
 
56
 
57
  return [
58
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
 
 
 
59
  ]
60
 
61
  def _generate_examples(self, filepath):
62
  logger.info("⏳ Generating examples from = %s", filepath)
63
  with open(filepath, encoding="utf-8") as f:
64
+ guid = 0
65
+ tokens = []
66
+ pos_tags = []
67
+ chunk_tags = []
68
+ ner_tags = []
69
+ for line in f:
70
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
71
+ if tokens:
72
+ yield guid, {
73
+ "id": str(guid),
74
+ "tokens": tokens,
75
+ "ner_tags": ner_tags,
76
+ }
77
+ guid += 1
78
+ ner_tags = []
79
  else:
80
+ # conll2003 tokens are space separated
81
+ splits = line.split(" ")
82
+ tokens.append(splits[0])])
83
+ ner_tags.append(splits[3].rstrip())
84
+ # last example
85
+ if tokens:
86
+ yield guid, {
87
+ "id": str(guid),
88
+ "tokens": tokens,
89
+ "ner_tags": ner_tags,
90
+ }