Kriyans commited on
Commit
5eedf2d
1 Parent(s): 693a88c

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +138 -48
indian_names.py CHANGED
@@ -2,7 +2,7 @@ import datasets
2
 
3
  logger = datasets.logging.get_logger(__name__)
4
 
5
- _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"
6
 
7
  class indian_namesConfig(datasets.BuilderConfig):
8
  """The WNUT 17 Emerging Entities Dataset."""
@@ -23,78 +23,168 @@ class indian_names(datasets.GeneratorBasedBuilder):
23
  ),
24
  ]
25
 
26
- def _info(self):
27
  return datasets.DatasetInfo(
 
28
  features=datasets.Features(
29
  {
30
  "id": datasets.Value("string"),
31
  "tokens": datasets.Sequence(datasets.Value("string")),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  "ner_tags": datasets.Sequence(
33
  datasets.features.ClassLabel(
34
  names=[
35
  "O",
36
- "B-corporation",
37
- "I-corporation",
38
- "B-person",
39
- "I-person"
 
 
 
 
40
  ]
41
  )
42
  ),
43
  }
44
  ),
45
  supervised_keys=None,
 
 
46
  )
47
 
48
  def _split_generators(self, dl_manager):
49
  """Returns SplitGenerators."""
50
- urls_to_download = {
51
- "train": f"{_URL}",
 
 
 
52
  }
53
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
54
 
55
  return [
56
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
 
 
57
  ]
58
-
59
  def _generate_examples(self, filepath):
60
  logger.info("⏳ Generating examples from = %s", filepath)
61
  with open(filepath, encoding="utf-8") as f:
62
- current_tokens = []
63
- current_labels = []
64
- sentence_counter = 0
65
- for row in f:
66
- row = row.rstrip()
67
- if row:
68
- if "\t" in row:
69
- token, label = row.split("\t")
70
- current_tokens.append(token)
71
- current_labels.append(label)
72
- else:
73
- # Handle cases where the delimiter is missing
74
- # You can choose to skip these rows or handle them differently
75
- logger.warning(f"Delimiter missing in row: {row}")
 
 
 
 
 
 
76
  else:
77
- # New sentence
78
- if not current_tokens:
79
- # Consecutive empty lines will cause empty sentences
80
- continue
81
- assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
82
- sentence = (
83
- sentence_counter,
84
- {
85
- "id": str(sentence_counter),
86
- "tokens": current_tokens,
87
- "ner_tags": current_labels,
88
- },
89
- )
90
- sentence_counter += 1
91
- current_tokens = []
92
- current_labels = []
93
- yield sentence
94
- # Don't forget the last sentence in the dataset 🧐
95
- if current_tokens:
96
- yield sentence_counter, {
97
- "id": str(sentence_counter),
98
- "tokens": current_tokens,
99
- "ner_tags": current_labels,
100
  }
 
2
 
3
  logger = datasets.logging.get_logger(__name__)
4
 
5
+ _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/train.txt"
6
 
7
  class indian_namesConfig(datasets.BuilderConfig):
8
  """The WNUT 17 Emerging Entities Dataset."""
 
23
  ),
24
  ]
25
 
26
+ def _info(self):
27
  return datasets.DatasetInfo(
28
+ description=_DESCRIPTION,
29
  features=datasets.Features(
30
  {
31
  "id": datasets.Value("string"),
32
  "tokens": datasets.Sequence(datasets.Value("string")),
33
+ "pos_tags": datasets.Sequence(
34
+ datasets.features.ClassLabel(
35
+ names=[
36
+ '"',
37
+ "''",
38
+ "#",
39
+ "$",
40
+ "(",
41
+ ")",
42
+ ",",
43
+ ".",
44
+ ":",
45
+ "``",
46
+ "CC",
47
+ "CD",
48
+ "DT",
49
+ "EX",
50
+ "FW",
51
+ "IN",
52
+ "JJ",
53
+ "JJR",
54
+ "JJS",
55
+ "LS",
56
+ "MD",
57
+ "NN",
58
+ "NNP",
59
+ "NNPS",
60
+ "NNS",
61
+ "NN|SYM",
62
+ "PDT",
63
+ "POS",
64
+ "PRP",
65
+ "PRP$",
66
+ "RB",
67
+ "RBR",
68
+ "RBS",
69
+ "RP",
70
+ "SYM",
71
+ "TO",
72
+ "UH",
73
+ "VB",
74
+ "VBD",
75
+ "VBG",
76
+ "VBN",
77
+ "VBP",
78
+ "VBZ",
79
+ "WDT",
80
+ "WP",
81
+ "WP$",
82
+ "WRB",
83
+ ]
84
+ )
85
+ ),
86
+ "chunk_tags": datasets.Sequence(
87
+ datasets.features.ClassLabel(
88
+ names=[
89
+ "O",
90
+ "B-ADJP",
91
+ "I-ADJP",
92
+ "B-ADVP",
93
+ "I-ADVP",
94
+ "B-CONJP",
95
+ "I-CONJP",
96
+ "B-INTJ",
97
+ "I-INTJ",
98
+ "B-LST",
99
+ "I-LST",
100
+ "B-NP",
101
+ "I-NP",
102
+ "B-PP",
103
+ "I-PP",
104
+ "B-PRT",
105
+ "I-PRT",
106
+ "B-SBAR",
107
+ "I-SBAR",
108
+ "B-UCP",
109
+ "I-UCP",
110
+ "B-VP",
111
+ "I-VP",
112
+ ]
113
+ )
114
+ ),
115
  "ner_tags": datasets.Sequence(
116
  datasets.features.ClassLabel(
117
  names=[
118
  "O",
119
+ "B-PER",
120
+ "I-PER",
121
+ "B-ORG",
122
+ "I-ORG",
123
+ "B-LOC",
124
+ "I-LOC",
125
+ "B-MISC",
126
+ "I-MISC",
127
  ]
128
  )
129
  ),
130
  }
131
  ),
132
  supervised_keys=None,
133
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
134
+ citation=_CITATION,
135
  )
136
 
137
  def _split_generators(self, dl_manager):
138
  """Returns SplitGenerators."""
139
+ downloaded_file = dl_manager.download_and_extract(_URL)
140
+ data_files = {
141
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
142
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
143
+ "test": os.path.join(downloaded_file, _TEST_FILE),
144
  }
 
145
 
146
  return [
147
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
148
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
149
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
150
  ]
151
+
152
  def _generate_examples(self, filepath):
153
  logger.info("⏳ Generating examples from = %s", filepath)
154
  with open(filepath, encoding="utf-8") as f:
155
+ guid = 0
156
+ tokens = []
157
+ pos_tags = []
158
+ chunk_tags = []
159
+ ner_tags = []
160
+ for line in f:
161
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
162
+ if tokens:
163
+ yield guid, {
164
+ "id": str(guid),
165
+ "tokens": tokens,
166
+ "pos_tags": pos_tags,
167
+ "chunk_tags": chunk_tags,
168
+ "ner_tags": ner_tags,
169
+ }
170
+ guid += 1
171
+ tokens = []
172
+ pos_tags = []
173
+ chunk_tags = []
174
+ ner_tags = []
175
  else:
176
+ # conll2003 tokens are space separated
177
+ splits = line.split(" ")
178
+ tokens.append(splits[0])
179
+ pos_tags.append(splits[1])
180
+ chunk_tags.append(splits[2])
181
+ ner_tags.append(splits[3].rstrip())
182
+ # last example
183
+ if tokens:
184
+ yield guid, {
185
+ "id": str(guid),
186
+ "tokens": tokens,
187
+ "pos_tags": pos_tags,
188
+ "chunk_tags": chunk_tags,
189
+ "ner_tags": ner_tags,
 
 
 
 
 
 
 
 
 
190
  }