manu commited on
Commit
97a3908
1 Parent(s): a1b7f29

Delete french-bench-grammar-vocab-reading.py

Browse files
french-bench-grammar-vocab-reading.py DELETED
@@ -1,95 +0,0 @@
1
- """FrenchBench dataset."""
2
- import itertools
3
- from dataclasses import dataclass
4
- from typing import List, Optional
5
-
6
- import pyarrow as pa
7
- import pyarrow.parquet as pq
8
-
9
- import datasets
10
- from datasets.table import table_cast
11
- from datasets import SplitInfo
12
-
13
-
14
- logger = datasets.utils.logging.get_logger(__name__)
15
-
16
- URLS = ["https://huggingface.co/datasets/manu/french-bench-grammar-vocab-reading/resolve/main/data/Grammar-00000-of-00001-1b3667841d70df82.parquet",
17
- "https://huggingface.co/datasets/manu/french-bench-grammar-vocab-reading/resolve/main/data/Vocabulary-00000-of-00001-8b228c3d484c6cf3.parquet",
18
- "https://huggingface.co/datasets/manu/french-bench-grammar-vocab-reading/resolve/main/data/Reading-00000-of-00001-6ec75512c22ddcfa.parquet"]
19
-
20
- @dataclass
21
- class ParquetConfig(datasets.BuilderConfig):
22
- """BuilderConfig for Parquet."""
23
-
24
- batch_size: int = 10_000
25
- columns: Optional[List[str]] = None
26
- features: Optional[datasets.Features] = None
27
-
28
-
29
- class FrenchBenchGrammarVocabReading(datasets.ArrowBasedBuilder):
30
- BUILDER_CONFIG_CLASS = ParquetConfig
31
-
32
- def _info(self):
33
- splits = {'Grammar': SplitInfo(name='Grammar', num_bytes=29094, num_examples=119, shard_lengths=None, dataset_name=None), 'Vocabulary': SplitInfo(name='Vocabulary', num_bytes=30944, num_examples=119, shard_lengths=None, dataset_name=None), 'Reading': SplitInfo(name='Reading', num_bytes=115507, num_examples=71, shard_lengths=None, dataset_name=None)}
34
- return datasets.DatasetInfo(features=self.config.features, splits=splits)
35
-
36
- def _split_generators(self, dl_manager):
37
- """We handle string, list and dicts in datafiles"""
38
- # if not self.config.data_files:
39
- # raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
40
- data_files = dl_manager.download_and_extract(URLS)
41
- if isinstance(data_files, (str, list, tuple)):
42
- files = data_files
43
- if isinstance(files, str):
44
- files = [files]
45
- # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
46
- files = [dl_manager.iter_files(file) for file in files]
47
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
48
- splits = []
49
- for split_name, files in data_files.items():
50
- if isinstance(files, str):
51
- files = [files]
52
- # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
53
- files = [dl_manager.iter_files(file) for file in files]
54
- # Infer features if they are stored in the arrow schema
55
- if self.info.features is None:
56
- for file in itertools.chain.from_iterable(files):
57
- with open(file, "rb") as f:
58
- features = datasets.Features.from_arrow_schema(pq.read_schema(f))
59
- if self.config.columns is not None:
60
- features = datasets.Features(
61
- {col: feat for col, feat in features.items() if col in self.config.columns}
62
- )
63
- self.info.features = features
64
- break
65
- splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
66
- return splits
67
-
68
- def _cast_table(self, pa_table: pa.Table) -> pa.Table:
69
- if self.info.features is not None:
70
- # more expensive cast to support nested features with keys in a different order
71
- # allows str <-> int/float or str to Audio for example
72
- pa_table = table_cast(pa_table, self.info.features.arrow_schema)
73
- return pa_table
74
-
75
- def _generate_tables(self, files):
76
- if self.config.features is not None and self.config.columns is not None:
77
- if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
78
- raise ValueError(
79
- f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
80
- )
81
- for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
82
- with open(file, "rb") as f:
83
- parquet_file = pq.ParquetFile(f)
84
- try:
85
- for batch_idx, record_batch in enumerate(
86
- parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
87
- ):
88
- pa_table = pa.Table.from_batches([record_batch])
89
- # Uncomment for debugging (will print the Arrow table size and elements)
90
- # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
91
- # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
92
- yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
93
- except ValueError as e:
94
- logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
95
- raise