Kriyans commited on
Commit
6217f1a
β€’
1 Parent(s): 58dd593

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +76 -76
indian_names.py CHANGED
@@ -58,84 +58,84 @@ class indian_names(datasets.GeneratorBasedBuilder):
58
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
59
  ]
60
 
61
- def _generate_examples(self, filepath):
62
- logger.info("⏳ Generating examples from = %s", filepath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  with open(filepath, encoding="utf-8") as f:
64
- current_tokens = []
65
- current_labels = []
66
- sentence_counter = 0
67
- for row in f:
68
- row = row.rstrip()
69
- if row:
70
- # Check if the delimiter ("\t") is present in the row
71
- if "\t" in row:
72
  token, label = row.split("\t")
73
  current_tokens.append(token)
74
  current_labels.append(label)
75
  else:
76
- # Handle cases where the delimiter is missing
77
- # You can choose to skip these rows or handle them differently
78
- logger.warning(f"Delimiter missing in row: {row}")
79
- else:
80
- # New sentence
81
- if not current_tokens:
82
- # Consecutive empty lines will cause empty sentences
83
- continue
84
- assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
85
- sentence = (
86
- sentence_counter,
87
- {
88
- "id": str(sentence_counter),
89
- "tokens": current_tokens,
90
- "ner_tags": current_labels,
91
- },
92
- )
93
- sentence_counter += 1
94
- current_tokens = []
95
- current_labels = []
96
- yield sentence
97
- # Don't forget the last sentence in the dataset 🧐
98
- if current_tokens:
99
- yield sentence_counter, {
100
- "id": str(sentence_counter),
101
- "tokens": current_tokens,
102
- "ner_tags": current_labels,
103
- }
104
-
105
- # def _generate_examples(self, filepath):
106
- # logger.info("⏳ Generating examples from = %s", filepath)
107
- # with open(filepath, encoding="utf-8") as f:
108
- # current_tokens = []
109
- # current_labels = []
110
- # sentence_counter = 0
111
- # for row in f:
112
- # row = row.rstrip()
113
- # if row:
114
- # token, label = row.split("\t")
115
- # current_tokens.append(token)
116
- # current_labels.append(label)
117
- # else:
118
- # # New sentence
119
- # if not current_tokens:
120
- # # Consecutive empty lines will cause empty sentences
121
- # continue
122
- # assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
123
- # sentence = (
124
- # sentence_counter,
125
- # {
126
- # "id": str(sentence_counter),
127
- # "tokens": current_tokens,
128
- # "ner_tags": current_labels,
129
- # },
130
- # )
131
- # sentence_counter += 1
132
- # current_tokens = []
133
- # current_labels = []
134
- # yield sentence
135
- # # Don't forget last sentence in dataset 🧐
136
- # if current_tokens:
137
- # yield sentence_counter, {
138
- # "id": str(sentence_counter),
139
- # "tokens": current_tokens,
140
- # "ner_tags": current_labels,
141
- # }
 
58
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
59
  ]
60
 
61
+ # def _generate_examples(self, filepath):
62
+ # logger.info("⏳ Generating examples from = %s", filepath)
63
+ # with open(filepath, encoding="utf-8") as f:
64
+ # current_tokens = []
65
+ # current_labels = []
66
+ # sentence_counter = 0
67
+ # for row in f:
68
+ # row = row.rstrip()
69
+ # if row:
70
+ # # Check if the delimiter ("\t") is present in the row
71
+ # if "\t" in row:
72
+ # token, label = row.split("\t")
73
+ # current_tokens.append(token)
74
+ # current_labels.append(label)
75
+ # else:
76
+ # # Handle cases where the delimiter is missing
77
+ # # You can choose to skip these rows or handle them differently
78
+ # logger.warning(f"Delimiter missing in row: {row}")
79
+ # else:
80
+ # # New sentence
81
+ # if not current_tokens:
82
+ # # Consecutive empty lines will cause empty sentences
83
+ # continue
84
+ # assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
85
+ # sentence = (
86
+ # sentence_counter,
87
+ # {
88
+ # "id": str(sentence_counter),
89
+ # "tokens": current_tokens,
90
+ # "ner_tags": current_labels,
91
+ # },
92
+ # )
93
+ # sentence_counter += 1
94
+ # current_tokens = []
95
+ # current_labels = []
96
+ # yield sentence
97
+ # # Don't forget the last sentence in the dataset 🧐
98
+ # if current_tokens:
99
+ # yield sentence_counter, {
100
+ # "id": str(sentence_counter),
101
+ # "tokens": current_tokens,
102
+ # "ner_tags": current_labels,
103
+ # }
104
+
105
+ def _generate_examples(self, filepath):
106
+ logger.info("⏳ Generating examples from = %s", filepath)
107
  with open(filepath, encoding="utf-8") as f:
108
+ current_tokens = []
109
+ current_labels = []
110
+ sentence_counter = 0
111
+ for row in f:
112
+ row = row.rstrip()
113
+ if row:
 
 
114
  token, label = row.split("\t")
115
  current_tokens.append(token)
116
  current_labels.append(label)
117
  else:
118
+ # New sentence
119
+ if not current_tokens:
120
+ # Consecutive empty lines will cause empty sentences
121
+ continue
122
+ assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
123
+ sentence = (
124
+ sentence_counter,
125
+ {
126
+ "id": str(sentence_counter),
127
+ "tokens": current_tokens,
128
+ "ner_tags": current_labels,
129
+ },
130
+ )
131
+ sentence_counter += 1
132
+ current_tokens = []
133
+ current_labels = []
134
+ yield sentence
135
+ # Don't forget last sentence in dataset 🧐
136
+ if current_tokens:
137
+ yield sentence_counter, {
138
+ "id": str(sentence_counter),
139
+ "tokens": current_tokens,
140
+ "ner_tags": current_labels,
141
+ }