Datasets:
debug
Browse files- nbnn_language_detection.py +10 -4
nbnn_language_detection.py
CHANGED
@@ -7,7 +7,7 @@ class NbnnLanguageDetection(DatasetBuilder):
|
|
7 |
VERSION = "0.1.0"
|
8 |
|
9 |
def _info(self):
|
10 |
-
print("
|
11 |
return DatasetInfo(
|
12 |
features=Features({
|
13 |
'text': Value('string'),
|
@@ -17,7 +17,7 @@ class NbnnLanguageDetection(DatasetBuilder):
|
|
17 |
)
|
18 |
|
19 |
def _split_generators(self, dl_manager):
|
20 |
-
print("
|
21 |
urls = {
|
22 |
'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
|
23 |
'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
|
@@ -25,7 +25,7 @@ class NbnnLanguageDetection(DatasetBuilder):
|
|
25 |
}
|
26 |
|
27 |
downloaded_files = dl_manager.download(urls)
|
28 |
-
print(f"Downloaded files: {downloaded_files}")
|
29 |
|
30 |
return [
|
31 |
SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
|
@@ -33,12 +33,18 @@ class NbnnLanguageDetection(DatasetBuilder):
|
|
33 |
]
|
34 |
|
35 |
def _generate_examples(self, filepath):
|
36 |
-
print(f"
|
37 |
with open(filepath, 'r') as f:
|
38 |
for id, line in enumerate(f):
|
|
|
39 |
data = json.loads(line)
|
|
|
40 |
yield id, {
|
41 |
'text': data['text'],
|
42 |
'language': data['language']
|
43 |
}
|
44 |
|
|
|
|
|
|
|
|
|
|
7 |
VERSION = "0.1.0"
|
8 |
|
9 |
def _info(self):
|
10 |
+
print("DEBUG: Inside _info method")
|
11 |
return DatasetInfo(
|
12 |
features=Features({
|
13 |
'text': Value('string'),
|
|
|
17 |
)
|
18 |
|
19 |
def _split_generators(self, dl_manager):
|
20 |
+
print("DEBUG: Inside _split_generators method")
|
21 |
urls = {
|
22 |
'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
|
23 |
'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
|
|
|
25 |
}
|
26 |
|
27 |
downloaded_files = dl_manager.download(urls)
|
28 |
+
print(f"DEBUG: Downloaded files: {downloaded_files}")
|
29 |
|
30 |
return [
|
31 |
SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
|
|
|
33 |
]
|
34 |
|
35 |
def _generate_examples(self, filepath):
|
36 |
+
print(f"DEBUG: Inside _generate_examples method with filepath: {filepath}")
|
37 |
with open(filepath, 'r') as f:
|
38 |
for id, line in enumerate(f):
|
39 |
+
print(f"DEBUG: Processing line {id}")
|
40 |
data = json.loads(line)
|
41 |
+
print(f"DEBUG: Yielding id: {id}, text: {data['text']}, language: {data['language']}")
|
42 |
yield id, {
|
43 |
'text': data['text'],
|
44 |
'language': data['language']
|
45 |
}
|
46 |
|
47 |
+
def _prepare_split(self, *args, **kwargs):
|
48 |
+
print("DEBUG: Inside _prepare_split method, which should not be called directly.")
|
49 |
+
super()._prepare_split(*args, **kwargs)
|
50 |
+
|