Datasets:
Edward J. Schwartz
commited on
Commit
•
f35dde1
1
Parent(s):
a4aefde
Remove script from this dataset
Browse files- oo-method-test.py +0 -61
oo-method-test.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
#!/usr/bin/python
|
2 |
-
|
3 |
-
import datasets
|
4 |
-
|
5 |
-
import pyarrow as pa
|
6 |
-
import pyarrow.parquet as pq
|
7 |
-
|
8 |
-
_DATA_FILES = ['data/combined-00009-of-00013-97a88bccf4215954.parquet',
|
9 |
-
'data/combined-00004-of-00013-119d653561443d7b.parquet',
|
10 |
-
'data/combined-00007-of-00013-ab54cce4ee6331d0.parquet',
|
11 |
-
'data/combined-00002-of-00013-149f5d0d22fe8f52.parquet',
|
12 |
-
'data/combined-00003-of-00013-426af6f6064e67dd.parquet',
|
13 |
-
'data/combined-00010-of-00013-89d7565c5f0d2e4e.parquet',
|
14 |
-
'data/combined-00000-of-00013-36d239509fb9e430.parquet',
|
15 |
-
'data/combined-00005-of-00013-363bba92a2b7f737.parquet',
|
16 |
-
'data/combined-00006-of-00013-4d4d574c9d87176e.parquet',
|
17 |
-
'data/combined-00001-of-00013-d5b44e96ad7d2927.parquet',
|
18 |
-
'data/combined-00012-of-00013-84cf41ef75dd5b76.parquet',
|
19 |
-
'data/combined-00011-of-00013-4c21766cedd5a4a0.parquet',
|
20 |
-
'data/combined-00008-of-00013-674f74b6f2288c61.parquet']
|
21 |
-
|
22 |
-
class OOMethodTestDataset(datasets.ArrowBasedBuilder):
|
23 |
-
def __init__(self, *args, **kwargs):
|
24 |
-
super().__init__(*args, **kwargs)
|
25 |
-
|
26 |
-
def _info(self):
|
27 |
-
return datasets.DatasetInfo()
|
28 |
-
|
29 |
-
def _split_generators(self, dl_manager):
|
30 |
-
files = _DATA_FILES
|
31 |
-
downloaded_files = dl_manager.download(files)
|
32 |
-
|
33 |
-
#print(files)
|
34 |
-
#print(downloaded_files)
|
35 |
-
|
36 |
-
return [
|
37 |
-
datasets.SplitGenerator(
|
38 |
-
name="combined",
|
39 |
-
gen_kwargs={
|
40 |
-
"files": downloaded_files,
|
41 |
-
},
|
42 |
-
),
|
43 |
-
]
|
44 |
-
|
45 |
-
def _generate_tables(self, files):
|
46 |
-
for file_idx, file in enumerate(files):
|
47 |
-
with open(file, "rb") as f:
|
48 |
-
parquet_file = pq.ParquetFile(f)
|
49 |
-
try:
|
50 |
-
for batch_idx, record_batch in enumerate(
|
51 |
-
parquet_file.iter_batches(batch_size=10_000)
|
52 |
-
):
|
53 |
-
pa_table = pa.Table.from_batches([record_batch])
|
54 |
-
# Uncomment for debugging (will print the Arrow table size and elements)
|
55 |
-
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
|
56 |
-
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
|
57 |
-
yield f"{file_idx}_{batch_idx}", pa_table
|
58 |
-
except ValueError as e:
|
59 |
-
#logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
|
60 |
-
raise
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|