Kriyans commited on
Commit
fc52e82
1 Parent(s): d474363

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +41 -41
indian_names.py CHANGED
@@ -99,45 +99,45 @@ class indian_names(datasets.GeneratorBasedBuilder):
99
  # "ner_tags": current_labels,
100
  # }
101
 
102
- def _generate_examples(self, filepath):
103
- logger.info("⏳ Generating examples from = %s", filepath)
104
- with open(filepath, encoding="utf-8") as f:
105
- current_tokens = []
106
- current_labels = []
107
- sentence_counter = 0
108
- for row in f:
109
- row = row.rstrip()
110
- if row:
111
- if "\t" in row:
112
- token, label = row.split("\t")
113
- current_tokens.append(token)
114
- current_labels.append(label)
115
- else:
116
- # Handle cases where the delimiter is missing
117
- # You can choose to skip these rows or handle them differently
118
- logger.warning(f"Delimiter missing in row: {row}")
119
  else:
120
- # New sentence
121
- if not current_tokens:
122
- # Consecutive empty lines will cause empty sentences
123
- continue
124
- assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
125
- sentence = (
126
- sentence_counter,
127
- {
128
- "id": str(sentence_counter),
129
- "tokens": current_tokens,
130
- "ner_tags": current_labels,
131
- },
132
- )
133
- sentence_counter += 1
134
- current_tokens = []
135
- current_labels = []
136
- yield sentence
137
- # Don't forget last sentence in dataset 🧐
138
- if current_tokens:
139
- yield sentence_counter, {
140
- "id": str(sentence_counter),
141
- "tokens": current_tokens,
142
- "ner_tags": current_labels,
143
- }
 
 
 
 
 
99
  # "ner_tags": current_labels,
100
  # }
101
 
102
+ def _generate_examples(self, filepath):
103
+ logger.info("⏳ Generating examples from = %s", filepath)
104
+ with open(filepath, encoding="utf-8") as f:
105
+ current_tokens = []
106
+ current_labels = []
107
+ sentence_counter = 0
108
+ for row in f:
109
+ row = row.rstrip()
110
+ if row:
111
+ if "\t" in row:
112
+ token, label = row.split("\t")
113
+ current_tokens.append(token)
114
+ current_labels.append(label)
 
 
 
 
115
  else:
116
+ # Handle cases where the delimiter is missing
117
+ # You can choose to skip these rows or handle them differently
118
+ logger.warning(f"Delimiter missing in row: {row}")
119
+ else:
120
+ # New sentence
121
+ if not current_tokens:
122
+ # Consecutive empty lines will cause empty sentences
123
+ continue
124
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
125
+ sentence = (
126
+ sentence_counter,
127
+ {
128
+ "id": str(sentence_counter),
129
+ "tokens": current_tokens,
130
+ "ner_tags": current_labels,
131
+ },
132
+ )
133
+ sentence_counter += 1
134
+ current_tokens = []
135
+ current_labels = []
136
+ yield sentence
137
+ # Don't forget the last sentence in the dataset 🧐
138
+ if current_tokens:
139
+ yield sentence_counter, {
140
+ "id": str(sentence_counter),
141
+ "tokens": current_tokens,
142
+ "ner_tags": current_labels,
143
+ }