Kriyans commited on
Commit
8a48f8c
β€’
1 Parent(s): 1de13a4

Update indian_names.py

Browse files
Files changed (1) hide show
  1. indian_names.py +82 -82
indian_names.py CHANGED
@@ -47,91 +47,91 @@ class indian_names(datasets.GeneratorBasedBuilder):
47
  supervised_keys=None,
48
  )
49
 
50
- def _split_generators(self, dl_manager):
51
- """Returns SplitGenerators."""
52
- urls_to_download = {
53
- "train": f"{_URL}",
54
- }
55
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
56
 
57
- return [
58
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
59
- ]
60
 
61
- def _generate_examples(self, filepath):
62
- logger.info("⏳ Generating examples from = %s", filepath)
63
- with open(filepath, encoding="utf-8") as f:
64
- current_tokens = []
65
- current_labels = []
66
- sentence_counter = 0
67
- for row in f:
68
- row = row.rstrip()
69
- if row:
70
- # Check if the delimiter ("\t") is present in the row
71
- if "\t" in row:
72
- token, label = row.split("\t")
73
- current_tokens.append(token)
74
- current_labels.append(label)
75
- else:
76
- # New sentence
77
- if not current_tokens:
78
- # Consecutive empty lines will cause empty sentences
79
- continue
80
- assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
81
- sentence = (
82
- sentence_counter,
83
- {
84
- "id": str(sentence_counter),
85
- "tokens": current_tokens,
86
- "ner_tags": current_labels,
87
- },
88
- )
89
- sentence_counter += 1
90
- current_tokens = []
91
- current_labels = []
92
- yield sentence
93
- # Don't forget the last sentence in the dataset 🧐
94
- if current_tokens:
95
- yield sentence_counter, {
96
- "id": str(sentence_counter),
97
- "tokens": current_tokens,
98
- "ner_tags": current_labels,
99
- }
100
-
101
- # def _generate_examples(self, filepath):
102
- # logger.info("⏳ Generating examples from = %s", filepath)
103
  # with open(filepath, encoding="utf-8") as f:
104
- # current_tokens = []
105
- # current_labels = []
106
- # sentence_counter = 0
107
- # for row in f:
108
- # row = row.rstrip()
109
- # if row:
 
 
110
  # token, label = row.split("\t")
111
  # current_tokens.append(token)
112
  # current_labels.append(label)
113
- # else:
114
- # # New sentence
115
- # if not current_tokens:
116
- # # Consecutive empty lines will cause empty sentences
117
- # continue
118
- # assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
119
- # sentence = (
120
- # sentence_counter,
121
- # {
122
- # "id": str(sentence_counter),
123
- # "tokens": current_tokens,
124
- # "ner_tags": current_labels,
125
- # },
126
- # )
127
- # sentence_counter += 1
128
- # current_tokens = []
129
- # current_labels = []
130
- # yield sentence
131
- # # Don't forget last sentence in dataset 🧐
132
- # if current_tokens:
133
- # yield sentence_counter, {
134
- # "id": str(sentence_counter),
135
- # "tokens": current_tokens,
136
- # "ner_tags": current_labels,
137
- # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  supervised_keys=None,
48
  )
49
 
50
+ # def _split_generators(self, dl_manager):
51
+ # """Returns SplitGenerators."""
52
+ # urls_to_download = {
53
+ # "train": f"{_URL}",
54
+ # }
55
+ # downloaded_files = dl_manager.download_and_extract(urls_to_download)
56
 
57
+ # return [
58
+ # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
59
+ # ]
60
 
61
+ # def _generate_examples(self, filepath):
62
+ # logger.info("⏳ Generating examples from = %s", filepath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  # with open(filepath, encoding="utf-8") as f:
64
+ # current_tokens = []
65
+ # current_labels = []
66
+ # sentence_counter = 0
67
+ # for row in f:
68
+ # row = row.rstrip()
69
+ # if row:
70
+ # # Check if the delimiter ("\t") is present in the row
71
+ # if "\t" in row:
72
  # token, label = row.split("\t")
73
  # current_tokens.append(token)
74
  # current_labels.append(label)
75
+ # else:
76
+ # # New sentence
77
+ # if not current_tokens:
78
+ # # Consecutive empty lines will cause empty sentences
79
+ # continue
80
+ # assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
81
+ # sentence = (
82
+ # sentence_counter,
83
+ # {
84
+ # "id": str(sentence_counter),
85
+ # "tokens": current_tokens,
86
+ # "ner_tags": current_labels,
87
+ # },
88
+ # )
89
+ # sentence_counter += 1
90
+ # current_tokens = []
91
+ # current_labels = []
92
+ # yield sentence
93
+ # # Don't forget the last sentence in the dataset 🧐
94
+ # if current_tokens:
95
+ # yield sentence_counter, {
96
+ # "id": str(sentence_counter),
97
+ # "tokens": current_tokens,
98
+ # "ner_tags": current_labels,
99
+ # }
100
+
101
+ def _generate_examples(self, filepath):
102
+ logger.info("⏳ Generating examples from = %s", filepath)
103
+ with open(filepath, encoding="utf-8") as f:
104
+ current_tokens = []
105
+ current_labels = []
106
+ sentence_counter = 0
107
+ for row in f:
108
+ row = row.rstrip()
109
+ if row:
110
+ token, label = row.split("\t")
111
+ current_tokens.append(token)
112
+ current_labels.append(label)
113
+ else:
114
+ # New sentence
115
+ if not current_tokens:
116
+ # Consecutive empty lines will cause empty sentences
117
+ continue
118
+ assert len(current_tokens) == len(current_labels), "πŸ’” between len of tokens & labels"
119
+ sentence = (
120
+ sentence_counter,
121
+ {
122
+ "id": str(sentence_counter),
123
+ "tokens": current_tokens,
124
+ "ner_tags": current_labels,
125
+ },
126
+ )
127
+ sentence_counter += 1
128
+ current_tokens = []
129
+ current_labels = []
130
+ yield sentence
131
+ # Don't forget last sentence in dataset 🧐
132
+ if current_tokens:
133
+ yield sentence_counter, {
134
+ "id": str(sentence_counter),
135
+ "tokens": current_tokens,
136
+ "ner_tags": current_labels,
137
+ }