Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
Update indian_names.py
Browse files- indian_names.py +3 -1
indian_names.py
CHANGED
@@ -35,7 +35,8 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
35 |
supervised_keys=None,
|
36 |
)
|
37 |
|
38 |
-
|
|
|
39 |
urls_to_download = {
|
40 |
"train": f"file://{csv_file_path}",
|
41 |
}
|
@@ -45,6 +46,7 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
45 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
46 |
]
|
47 |
|
|
|
48 |
def _generate_examples(self, filepath):
|
49 |
with open(filepath, encoding="utf-8") as f:
|
50 |
current_tokens = []
|
|
|
35 |
supervised_keys=None,
|
36 |
)
|
37 |
|
38 |
+
|
39 |
+
def _split_generators(self, dl_manager):
|
40 |
urls_to_download = {
|
41 |
"train": f"file://{csv_file_path}",
|
42 |
}
|
|
|
46 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
47 |
]
|
48 |
|
49 |
+
|
50 |
def _generate_examples(self, filepath):
|
51 |
with open(filepath, encoding="utf-8") as f:
|
52 |
current_tokens = []
|