Kriyans commited on
Commit
40a738a
1 Parent(s): e9ba955

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +44 -28
indian_names.py CHANGED
@@ -2,7 +2,7 @@ import datasets
2
 
3
  logger = datasets.logging.get_logger(__name__)
4
 
5
- _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/indian_names"
6
 
7
  class indian_namesConfig(datasets.BuilderConfig):
8
  """The WNUT 17 Emerging Entities Dataset."""
@@ -32,8 +32,11 @@ class indian_names(datasets.GeneratorBasedBuilder):
32
  "ner_tags": datasets.Sequence(
33
  datasets.features.ClassLabel(
34
  names=[
35
- "B-PER",
36
- "B-ORG",
 
 
 
37
  ]
38
  )
39
  ),
@@ -52,33 +55,46 @@ class indian_names(datasets.GeneratorBasedBuilder):
52
  return [
53
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
54
  ]
55
-
56
  def _generate_examples(self, filepath):
57
  logger.info("⏳ Generating examples from = %s", filepath)
58
  with open(filepath, encoding="utf-8") as f:
59
- guid = 0
60
- tokens = []
61
- ner_tags = []
62
- for line in f:
63
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
64
- if tokens:
65
- yield guid, {
66
- "id": str(guid),
67
- "tokens": tokens,
68
- "ner_tags": ner_tags,
69
- }
70
- guid += 1
71
- tokens = []
72
- ner_tags = []
73
  else:
74
- # conll2003 tokens are space separated
75
- splits = line.split(" ")
76
- tokens.append(splits[0])
77
- ner_tags.append(splits[1].rstrip())
78
- # last example
79
- if tokens:
80
- yield guid, {
81
- "id": str(guid),
82
- "tokens": tokens,
83
- "ner_tags": ner_tags,
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  }
 
2
 
3
  logger = datasets.logging.get_logger(__name__)
4
 
5
+ _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"
6
 
7
  class indian_namesConfig(datasets.BuilderConfig):
8
  """The WNUT 17 Emerging Entities Dataset."""
 
32
  "ner_tags": datasets.Sequence(
33
  datasets.features.ClassLabel(
34
  names=[
35
+ "O",
36
+ "B-corporation",
37
+ "I-corporation",
38
+ "B-person",
39
+ "I-person"
40
  ]
41
  )
42
  ),
 
55
  return [
56
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
57
  ]
58
+
59
  def _generate_examples(self, filepath):
60
  logger.info("⏳ Generating examples from = %s", filepath)
61
  with open(filepath, encoding="utf-8") as f:
62
+ current_tokens = []
63
+ current_labels = []
64
+ sentence_counter = 0
65
+ for row in f:
66
+ row = row.rstrip()
67
+ if row:
68
+ if "\t" in row:
69
+ token, label = row.split("\t")
70
+ current_tokens.append(token)
71
+ current_labels.append(label)
72
+ else:
73
+ # Handle cases where the delimiter is missing
74
+ # You can choose to skip these rows or handle them differently
75
+ logger.warning(f"Delimiter missing in row: {row}")
76
  else:
77
+ # New sentence
78
+ if not current_tokens:
79
+ # Consecutive empty lines will cause empty sentences
80
+ continue
81
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
82
+ sentence = (
83
+ sentence_counter,
84
+ {
85
+ "id": str(sentence_counter),
86
+ "tokens": current_tokens,
87
+ "ner_tags": current_labels,
88
+ },
89
+ )
90
+ sentence_counter += 1
91
+ current_tokens = []
92
+ current_labels = []
93
+ yield sentence
94
+ # Don't forget the last sentence in the dataset 🧐
95
+ if current_tokens:
96
+ yield sentence_counter, {
97
+ "id": str(sentence_counter),
98
+ "tokens": current_tokens,
99
+ "ner_tags": current_labels,
100
  }