python_code
stringlengths 0
290k
| repo_name
stringclasses 30
values | file_path
stringlengths 6
125
|
---|---|---|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def get_writer_batch_size(features: Features) -> Optional[int]:
"""
Get the writer_batch_size that defines the maximum row group size in the parquet files.
The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
This allows to optimize random access to parquet file, since accessing 1 row requires
to read its entire row group.
This can be improved to get optimized size for querying/iterating
but at least it matches the dataset viewer expectations on HF.
Args:
ds_config_info (`datasets.info.DatasetInfo`):
Dataset info from `datasets`.
Returns:
writer_batch_size (`Optional[int]`):
Writer batch size to pass to a dataset builder.
If `None`, then it will use the `datasets` default.
"""
batch_size = np.inf
def set_batch_size(feature: FeatureType) -> None:
nonlocal batch_size
if isinstance(feature, Image):
batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
elif isinstance(feature, Audio):
batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
elif isinstance(feature, Value) and feature.dtype == "binary":
batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
_visit(features, set_batch_size)
return None if batch_size is np.inf else batch_size
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size or get_writer_batch_size(dataset.features)
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = self.dataset.features.arrow_schema
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in logging.tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating parquet from Arrow format",
):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices if self.dataset._indices is not None else None,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written
| datasets-main | src/datasets/io/parquet.py |
datasets-main | src/datasets/io/__init__.py |
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
| datasets-main | src/datasets/io/generator.py |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.csv.csv import Csv
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class CsvDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Csv(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class CsvDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_csv_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.to_csv_kwargs = to_csv_kwargs
def write(self) -> int:
_ = self.to_csv_kwargs.pop("path_or_buf", None)
header = self.to_csv_kwargs.pop("header", True)
index = self.to_csv_kwargs.pop("index", False)
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with open(self.path_or_buf, "wb+") as buffer:
written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
return written
def _batch_csv(self, args):
offset, header, index, to_csv_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
csv_str = batch.to_pandas().to_csv(
path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
)
return csv_str.encode(self.encoding)
def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
"""Writes the pyarrow table as CSV to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating CSV from Arrow format",
):
csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
written += file_obj.write(csv_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for csv_str in logging.tqdm(
pool.imap(
self._batch_csv,
[(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating CSV from Arrow format",
):
written += file_obj.write(csv_str)
return written
| datasets-main | src/datasets/io/csv.py |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
| datasets-main | src/datasets/io/text.py |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
| datasets-main | src/datasets/io/spark.py |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
| datasets-main | src/datasets/io/sql.py |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class JsonDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.field = field
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Json(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
field=field,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class JsonDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_json_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.to_json_kwargs = to_json_kwargs
def write(self) -> int:
_ = self.to_json_kwargs.pop("path_or_buf", None)
orient = self.to_json_kwargs.pop("orient", "records")
lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
self.to_json_kwargs["index"] = False
compression = self.to_json_kwargs.pop("compression", None)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer:
written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead."
)
written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
return written
def _batch_json(self, args):
offset, orient, lines, to_json_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _write(
self,
file_obj: BinaryIO,
orient,
lines,
**to_json_kwargs,
) -> int:
"""Writes the pyarrow table as JSON lines to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating json from Arrow format",
):
json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
written += file_obj.write(json_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json,
[(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating json from Arrow format",
):
written += file_obj.write(json_str)
return written
| datasets-main | src/datasets/io/json.py |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass
| datasets-main | src/datasets/io/abc.py |
# flake8: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| datasets-main | src/datasets/features/__init__.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" This class handle features definition in datasets and some utilities to display table type."""
import copy
import json
import re
import sys
from collections.abc import Iterable, Mapping
from collections.abc import Sequence as SequenceABC
from dataclasses import InitVar, dataclass, field, fields
from functools import reduce, wraps
from operator import mul
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from typing import Sequence as Sequence_
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.types
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
from .. import config
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
from ..table import array_cast
from ..utils import logging
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
from .audio import Audio
from .image import Image, encode_pil_image
from .translation import Translation, TranslationVariableLanguages
logger = logging.get_logger(__name__)
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
"""
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
"""
if pyarrow.types.is_null(arrow_type):
return "null"
elif pyarrow.types.is_boolean(arrow_type):
return "bool"
elif pyarrow.types.is_int8(arrow_type):
return "int8"
elif pyarrow.types.is_int16(arrow_type):
return "int16"
elif pyarrow.types.is_int32(arrow_type):
return "int32"
elif pyarrow.types.is_int64(arrow_type):
return "int64"
elif pyarrow.types.is_uint8(arrow_type):
return "uint8"
elif pyarrow.types.is_uint16(arrow_type):
return "uint16"
elif pyarrow.types.is_uint32(arrow_type):
return "uint32"
elif pyarrow.types.is_uint64(arrow_type):
return "uint64"
elif pyarrow.types.is_float16(arrow_type):
return "float16" # pyarrow dtype is "halffloat"
elif pyarrow.types.is_float32(arrow_type):
return "float32" # pyarrow dtype is "float"
elif pyarrow.types.is_float64(arrow_type):
return "float64" # pyarrow dtype is "double"
elif pyarrow.types.is_time32(arrow_type):
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_time64(arrow_type):
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_timestamp(arrow_type):
if arrow_type.tz is None:
return f"timestamp[{arrow_type.unit}]"
elif arrow_type.tz:
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
else:
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
elif pyarrow.types.is_date32(arrow_type):
return "date32" # pyarrow dtype is "date32[day]"
elif pyarrow.types.is_date64(arrow_type):
return "date64" # pyarrow dtype is "date64[ms]"
elif pyarrow.types.is_duration(arrow_type):
return f"duration[{arrow_type.unit}]"
elif pyarrow.types.is_decimal128(arrow_type):
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_decimal256(arrow_type):
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_binary(arrow_type):
return "binary"
elif pyarrow.types.is_large_binary(arrow_type):
return "large_binary"
elif pyarrow.types.is_string(arrow_type):
return "string"
elif pyarrow.types.is_large_string(arrow_type):
return "large_string"
else:
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
"""
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
Value(dtype=str)
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
purpose of this function.
"""
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
if examples:
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
msg += f"\nValid examples include: {examples}."
if urls:
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
msg += f"\nFor more insformation, see: {urls}."
return msg
if datasets_dtype in pa.__dict__:
return pa.__dict__[datasets_dtype]()
if (datasets_dtype + "_") in pa.__dict__:
return pa.__dict__[datasets_dtype + "_"]()
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
if timestamp_matches:
timestamp_internals = timestamp_matches.group(1)
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
if timestamp_internals in ["s", "ms", "us", "ns"]:
return pa.timestamp(timestamp_internals)
elif internals_matches:
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"timestamp",
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
)
)
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
if duration_matches:
duration_internals = duration_matches.group(1)
if duration_internals in ["s", "ms", "us", "ns"]:
return pa.duration(duration_internals)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"duration",
examples=["duration[s]", "duration[us]"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
)
)
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
if time_matches:
time_internals_bits = time_matches.group(1)
if time_internals_bits == "32":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["s", "ms"]:
return pa.time32(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
)
elif time_internals_bits == "64":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["us", "ns"]:
return pa.time64(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"time",
examples=["time32[s]", "time64[us]"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
],
)
)
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
if decimal_matches:
decimal_internals_bits = decimal_matches.group(1)
if decimal_internals_bits == "128":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal128(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal128",
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
)
)
elif decimal_internals_bits == "256":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal256(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal256",
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
)
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal",
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
],
)
)
raise ValueError(
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
f"Please make sure to use a correct data type, see: "
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
)
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
"""
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
It works recursively.
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast.
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
has_changed (bool): True if the object has been changed, False if it is identical
"""
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(obj, np.ndarray):
if obj.ndim == 0:
return obj[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj, False
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj
],
True,
)
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
if obj.ndim == 0:
return obj.detach().cpu().numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.detach().cpu().numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.detach().cpu().numpy()
],
True,
)
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
if obj.ndim == 0:
return obj.numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.numpy()
],
True,
)
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
if obj.ndim == 0:
return np.asarray(obj)[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return np.asarray(obj), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in np.asarray(obj)
],
True,
)
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
return encode_pil_image(obj), True
elif isinstance(obj, pd.Series):
return (
_cast_to_python_objects(
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, pd.DataFrame):
return (
{
key: _cast_to_python_objects(
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for key, value in obj.to_dict("list").items()
},
True,
)
elif isinstance(obj, pd.Timestamp):
return obj.to_pydatetime(), True
elif isinstance(obj, pd.Timedelta):
return obj.to_pytimedelta(), True
elif isinstance(obj, Mapping):
has_changed = not isinstance(obj, dict)
output = {}
for k, v in obj.items():
casted_v, has_changed_v = _cast_to_python_objects(
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
has_changed |= has_changed_v
output[k] = casted_v
return output if has_changed else obj, has_changed
elif hasattr(obj, "__array__"):
return (
_cast_to_python_objects(
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, (list, tuple)):
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt):
break
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
if has_changed_first_elmt or not optimize_list_casting:
return (
[
_cast_to_python_objects(
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for elmt in obj
],
True,
)
else:
if isinstance(obj, (list, tuple)):
return obj, False
else:
return list(obj), True
else:
return obj, False
else:
return obj, False
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
"""
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
It works recursively.
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
"""
return _cast_to_python_objects(
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
@dataclass
class Value:
"""
The `Value` dtypes are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="Value", init=False, repr=False)
def __post_init__(self):
if self.dtype == "double": # fix inferred type
self.dtype = "float64"
if self.dtype == "float": # fix inferred type
self.dtype = "float32"
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
elif pa.types.is_string(self.pa_type):
return str(value)
else:
return value
class _ArrayXD:
def __post_init__(self):
self.shape = tuple(self.shape)
def __call__(self):
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
return pa_type
def encode_example(self, value):
return value
@dataclass
class Array2D(_ArrayXD):
"""Create a two-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array2D", init=False, repr=False)
@dataclass
class Array3D(_ArrayXD):
"""Create a three-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array3D", init=False, repr=False)
@dataclass
class Array4D(_ArrayXD):
"""Create a four-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array4D", init=False, repr=False)
@dataclass
class Array5D(_ArrayXD):
"""Create a five-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array5D", init=False, repr=False)
class _ArrayXDExtensionType(pa.PyExtensionType):
ndims: Optional[int] = None
def __init__(self, shape: tuple, dtype: str):
if self.ndims is None or self.ndims <= 1:
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
if len(shape) != self.ndims:
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
for dim in range(1, self.ndims):
if shape[dim] is None:
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
self.shape = tuple(shape)
self.value_type = dtype
self.storage_dtype = self._generate_dtype(self.value_type)
pa.PyExtensionType.__init__(self, self.storage_dtype)
def __reduce__(self):
return self.__class__, (
self.shape,
self.value_type,
)
def __hash__(self):
return hash((self.__class__, self.shape, self.value_type))
def __arrow_ext_class__(self):
return ArrayExtensionArray
def _generate_dtype(self, dtype):
dtype = string_to_arrow(dtype)
for d in reversed(self.shape):
dtype = pa.list_(dtype)
# Don't specify the size of the list, since fixed length list arrays have issues
# being validated after slicing in pyarrow 0.17.1
return dtype
def to_pandas_dtype(self):
return PandasArrayExtensionDtype(self.value_type)
class Array2DExtensionType(_ArrayXDExtensionType):
ndims = 2
class Array3DExtensionType(_ArrayXDExtensionType):
ndims = 3
class Array4DExtensionType(_ArrayXDExtensionType):
ndims = 4
class Array5DExtensionType(_ArrayXDExtensionType):
ndims = 5
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
"""
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
# primitive types are types for which the physical representation in arrow and in numpy
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
"""
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
if pa.types.is_list(pa_type):
return _unnest_pa_type(pa_type.value_type)
return pa_type
if unnest:
pa_type = _unnest_pa_type(pa_type)
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
class ArrayExtensionArray(pa.ExtensionArray):
def __array__(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
return self.to_numpy(zero_copy_only=zero_copy_only)
def __getitem__(self, i):
return self.storage[i]
def to_numpy(self, zero_copy_only=True):
storage: pa.ListArray = self.storage
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
if self.type.shape[0] is not None:
size = 1
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
for i in range(self.type.ndims):
size *= self.type.shape[i]
storage = storage.flatten()
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
if len(null_indices):
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
else:
shape = self.type.shape
ndims = self.type.ndims
arrays = []
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
for i, is_null in enumerate(null_mask):
if is_null:
arrays.append(np.nan)
else:
storage_el = storage[i : i + 1]
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
# flatten storage
for _ in range(ndims):
storage_el = storage_el.flatten()
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
if len(np.unique(np.diff(first_dim_offsets))) > 1:
# ragged
numpy_arr = np.empty(len(arrays), dtype=object)
numpy_arr[:] = arrays
else:
numpy_arr = np.array(arrays)
return numpy_arr
def to_pylist(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
if self.type.shape[0] is None and numpy_arr.dtype == object:
return [arr.tolist() for arr in numpy_arr.tolist()]
else:
return numpy_arr.tolist()
class PandasArrayExtensionDtype(PandasExtensionDtype):
_metadata = "value_type"
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
self._value_type = value_type
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
if isinstance(array, pa.ChunkedArray):
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
return PandasArrayExtensionArray(numpy_arr)
@classmethod
def construct_array_type(cls):
return PandasArrayExtensionArray
@property
def type(self) -> type:
return np.ndarray
@property
def kind(self) -> str:
return "O"
@property
def name(self) -> str:
return f"array[{self.value_type}]"
@property
def value_type(self) -> np.dtype:
return self._value_type
class PandasArrayExtensionArray(PandasExtensionArray):
def __init__(self, data: np.ndarray, copy: bool = False):
self._data = data if not copy else np.array(data)
self._dtype = PandasArrayExtensionDtype(data.dtype)
def __array__(self, dtype=None):
"""
Convert to NumPy Array.
Note that Pandas expects a 1D array when dtype is set to object.
But for other dtypes, the returned shape is the same as the one of ``data``.
More info about pandas 1D requirement for PandasExtensionArray here:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
"""
if dtype == object:
out = np.empty(len(self._data), dtype=object)
for i in range(len(self._data)):
out[i] = self._data[i]
return out
if dtype is None:
return self._data
else:
return self._data.astype(dtype)
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
return PandasArrayExtensionArray(self._data, copy=True)
@classmethod
def _from_sequence(
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
) -> "PandasArrayExtensionArray":
if len(scalars) > 1 and all(
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
):
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
else:
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
if len(to_concat) > 1 and all(
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
for va in to_concat
):
data = np.vstack([va._data for va in to_concat])
else:
data = np.empty(len(to_concat), dtype=object)
data[:] = [va._data for va in to_concat]
return cls(data, copy=False)
@property
def dtype(self) -> PandasArrayExtensionDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self._data.nbytes
def isna(self) -> np.ndarray:
return np.array([pd.isna(arr).any() for arr in self._data])
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
raise NotImplementedError()
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
if isinstance(item, int):
return self._data[item]
return PandasArrayExtensionArray(self._data[item], copy=False)
def take(
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
) -> "PandasArrayExtensionArray":
indices: np.ndarray = np.asarray(indices, dtype=int)
if allow_fill:
fill_value = (
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
)
mask = indices == -1
if (indices < -1).any():
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
elif len(self) > 0:
pass
elif not np.all(mask):
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
else:
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
return PandasArrayExtensionArray(data, copy=False)
took = self._data.take(indices, axis=0)
if allow_fill and mask.any():
took[mask] = [fill_value] * np.sum(mask)
return PandasArrayExtensionArray(took, copy=False)
def __len__(self) -> int:
return len(self._data)
def __eq__(self, other) -> np.ndarray:
if not isinstance(other, PandasArrayExtensionArray):
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
return (self._data == other._data).all()
def pandas_types_mapper(dtype):
if isinstance(dtype, _ArrayXDExtensionType):
return PandasArrayExtensionDtype(dtype.value_type)
@dataclass
class ClassLabel:
"""Feature type for integer class labels.
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
* `num_classes`: Create 0 to (num_classes-1) labels.
* `names`: List of label strings.
* `names_file`: File containing the list of labels.
Under the hood the labels are stored as integers.
You can use negative integers to represent unknown/missing labels.
Args:
num_classes (`int`, *optional*):
Number of classes. All labels must be < `num_classes`.
names (`list` of `str`, *optional*):
String names for the integer classes.
The order in which the names are provided is kept.
names_file (`str`, *optional*):
Path to a file with names for the integer classes, one per line.
Example:
```py
>>> from datasets import Features
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
>>> features
{'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
```
"""
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
names: List[str] = None
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = field(default="ClassLabel", init=False, repr=False)
def __post_init__(self, num_classes, names_file):
self.num_classes = num_classes
self.names_file = names_file
if self.names_file is not None and self.names is not None:
raise ValueError("Please provide either names or names_file but not both.")
# Set self.names
if self.names is None:
if self.names_file is not None:
self.names = self._load_names_from_file(self.names_file)
elif self.num_classes is not None:
self.names = [str(i) for i in range(self.num_classes)]
else:
raise ValueError("Please provide either num_classes, names or names_file.")
elif not isinstance(self.names, SequenceABC):
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
# Set self.num_classes
if self.num_classes is None:
self.num_classes = len(self.names)
elif self.num_classes != len(self.names):
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
def __call__(self):
return self.pa_type
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
"""Conversion class name `string` => `integer`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].str2int('neg')
0
```
"""
if not isinstance(values, str) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, str):
values = [values]
return_list = False
output = [self._strval2int(value) for value in values]
return output if return_list else output[0]
def _strval2int(self, value: str) -> int:
failed_parse = False
value = str(value)
# first attempt - raw string value
int_value = self._str2int.get(value)
if int_value is None:
# second attempt - strip whitespace
int_value = self._str2int.get(value.strip())
if int_value is None:
# third attempt - convert str to int
try:
int_value = int(value)
except ValueError:
failed_parse = True
else:
if int_value < -1 or int_value >= self.num_classes:
failed_parse = True
if failed_parse:
raise ValueError(f"Invalid string class label {value}")
return int_value
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
"""Conversion `integer` => class name `string`.
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].int2str(0)
'neg'
```
"""
if not isinstance(values, int) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, int):
values = [values]
return_list = False
for v in values:
if not 0 <= v < self.num_classes:
raise ValueError(f"Invalid integer class label {v:d}")
output = [self._int2str[int(v)] for v in values]
return output if return_list else output[0]
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
return example_data
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, encoding="utf-8") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
@dataclass
class Sequence:
"""Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
Args:
feature:
A list of features of a single type or a dictionary of types.
length (`int`):
Length of the sequence.
Example:
```py
>>> from datasets import Features, Sequence, Value, ClassLabel
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
>>> features
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
```
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = field(default="Sequence", init=False, repr=False)
FeatureType = Union[
dict,
list,
tuple,
Value,
ClassLabel,
Translation,
TranslationVariableLanguages,
Sequence,
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
Image,
]
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
"""
Check if the object is not None.
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
"""
if obj is None:
return False
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
if len(obj) > 0:
if schema is None:
pass
elif isinstance(schema, (list, tuple)):
schema = schema[0]
else:
schema = schema.feature
return _check_non_null_non_empty_recursive(obj[0], schema)
else:
return False
else:
return True
def get_nested_type(schema: FeatureType) -> pa.DataType:
"""
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
generate_from_arrow_type().
It performs double-duty as the implementation of Features.type and handles the conversion of
datasets.Feature->pa.struct
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, Features):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
elif isinstance(schema, dict):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # however don't sort on struct types since the order matters
elif isinstance(schema, (list, tuple)):
if len(schema) != 1:
raise ValueError("When defining list feature, you should just provide one example of the inner type")
value_type = get_nested_type(schema[0])
return pa.list_(value_type)
elif isinstance(schema, Sequence):
value_type = get_nested_type(schema.feature)
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
return pa.list_(value_type, schema.length)
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
return schema()
def encode_nested_example(schema, obj, level=0):
"""Encode a nested example.
This is used since some features (in particular ClassLabel) have some logic during encoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
if level == 0 and obj is None:
raise ValueError("Got None but expected a dictionary instead")
return (
{
k: encode_nested_example(sub_schema, sub_obj, level=level + 1)
for k, (sub_schema, sub_obj) in zip_dict(schema, obj)
}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
if obj is None:
return None
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
# dict of list to fill
list_dict = {}
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k, dict_tuples in zip_dict(schema.feature, *obj):
list_dict[k] = [encode_nested_example(dict_tuples[0], o, level=level + 1) for o in dict_tuples[1:]]
return list_dict
else:
# obj is a single dict
for k, (sub_schema, sub_objs) in zip_dict(schema.feature, obj):
list_dict[k] = [encode_nested_example(sub_schema, o, level=level + 1) for o in sub_objs]
return list_dict
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
# be careful when comparing tensors here
if (
not isinstance(first_elmt, list)
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
):
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
return list(obj)
# Object with special encoding:
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
return schema.encode_example(obj) if obj is not None else None
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
return obj
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode a nested example.
This is used since some features (in particular Audio and Image) have some logic during decoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return (
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
return [decode_nested_example(sub_schema, o) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
else:
return decode_nested_example([schema.feature], obj)
# Object with special decoding:
elif isinstance(schema, (Audio, Image)):
# we pass the token to read and decode files from private repositories in streaming mode
if obj is not None and schema.decode:
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
return obj
def generate_from_dict(obj: Any):
"""Regenerate the nested feature object from a deserialized dict.
We use the '_type' fields to get the dataclass name to load.
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
that :class:`Value` automatically performs.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(obj, list):
return [generate_from_dict(value) for value in obj]
# Otherwise we have a dict or a dataclass
if "_type" not in obj or isinstance(obj["_type"], dict):
return {key: generate_from_dict(value) for key, value in obj.items()}
obj = dict(obj)
class_type = globals()[obj.pop("_type")]
if class_type == Sequence:
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
field_names = {f.name for f in fields(class_type)}
return class_type(**{k: v for k, v in obj.items() if k in field_names})
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
"""
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
a single field.
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
"""
if isinstance(pa_type, pa.StructType):
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
elif isinstance(pa_type, pa.FixedSizeListType):
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
elif isinstance(pa_type, pa.ListType):
feature = generate_from_arrow_type(pa_type.value_type)
if isinstance(feature, (dict, tuple, list)):
return [feature]
return Sequence(feature=feature)
elif isinstance(pa_type, _ArrayXDExtensionType):
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
elif isinstance(pa_type, pa.DictionaryType):
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
elif isinstance(pa_type, pa.DataType):
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
else:
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
arr = np.array(arr)
values = pa.array(arr.flatten(), type=type)
for i in range(arr.ndim - 1):
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
step_offsets = arr.shape[arr.ndim - i - 1]
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
values = pa.ListArray.from_arrays(offsets, values)
return values
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
null_mask = np.array([arr is None for arr in l_arr])
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
l_arr = [arr for arr in l_arr if arr is not None]
offsets = np.cumsum(
[0] + [len(arr) for arr in l_arr], dtype=object
) # convert to dtype object to allow None insertion
offsets = np.insert(offsets, null_indices, None)
offsets = pa.array(offsets, type=pa.int32())
values = pa.concat_arrays(l_arr)
return pa.ListArray.from_arrays(offsets, values)
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
if len(l_arr) > 0:
return list_of_pa_arrays_to_pyarrow_listarray(
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
)
else:
return pa.array([], type=type)
def contains_any_np_array(data: Any):
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
Args:
data (Any): Data.
Returns:
bool
"""
if isinstance(data, np.ndarray):
return True
elif isinstance(data, list):
return contains_any_np_array(first_non_null_value(data)[1])
else:
return False
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
Args:
data (Union[np.ndarray, List]): Data.
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
Returns:
pa.ListArray
"""
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data, type=type)
elif isinstance(data, list):
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
"""Convert to PyArrow ListArray.
Args:
data (Any): Sequence, iterable, np.ndarray or pd.Series.
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
Returns:
pyarrow.Array
"""
if contains_any_np_array(data):
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
else:
return pa.array(data, pa_type.storage_dtype)
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
"""Visit a (possibly nested) feature.
Args:
feature (FeatureType): the feature type to be checked
Returns:
visited feature (FeatureType)
"""
if isinstance(feature, dict):
out = func({k: _visit(f, func) for k, f in feature.items()})
elif isinstance(feature, (list, tuple)):
out = func([_visit(feature[0], func)])
elif isinstance(feature, Sequence):
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
else:
out = func(feature)
return feature if out is None else out
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
"""Check if a (possibly nested) feature requires decoding.
Args:
feature (FeatureType): the feature type to be checked
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
of the `decode` attribute of the decodable feature types.
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_decoding(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_decoding(feature[0])
elif isinstance(feature, Sequence):
return require_decoding(feature.feature)
else:
return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
def require_storage_cast(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires storage casting.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "cast_storage")
def require_storage_embed(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires embedding data into storage.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "embed_storage")
def keep_features_dicts_synced(func):
"""
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
in sync with the main dictionary.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Features" = args[0]
args = args[1:]
else:
self: "Features" = kwargs.pop("self")
out = func(self, *args, **kwargs)
assert hasattr(self, "_column_requires_decoding")
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
return out
wrapper._decorator_name_ = "_keep_dicts_synced"
return wrapper
class Features(dict):
"""A special dictionary that defines the internal structure of a dataset.
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
and values are the type of that column.
`FieldType` can be one of the following:
- a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
- a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
associated to them and will be stored as integers in the dataset.
- a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
features. It's possible to have nested fields of nested fields in an arbitrary manner.
- a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
`list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
type hosted in this list.
<Tip>
A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
[`~datasets.Sequence`].
</Tip>
- a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
- an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
- an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
- [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
"""
def __init__(*args, **kwargs):
# self not in the signature to allow passing self as a kwarg
if not args:
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
self, *args = args
super(Features, self).__init__(*args, **kwargs)
self._column_requires_decoding: Dict[str, bool] = {
col: require_decoding(feature) for col, feature in self.items()
}
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
update = keep_features_dicts_synced(dict.update)
setdefault = keep_features_dicts_synced(dict.setdefault)
pop = keep_features_dicts_synced(dict.pop)
popitem = keep_features_dicts_synced(dict.popitem)
clear = keep_features_dicts_synced(dict.clear)
def __reduce__(self):
return Features, (dict(self),)
@property
def type(self):
"""
Features field types.
Returns:
:obj:`pyarrow.DataType`
"""
return get_nested_type(self)
@property
def arrow_schema(self):
"""
Features schema.
Returns:
:obj:`pyarrow.Schema`
"""
hf_metadata = {"info": {"features": self.to_dict()}}
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
@classmethod
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
"""
Construct [`Features`] from Arrow Schema.
It also checks the schema metadata for Hugging Face Datasets features.
Non-nullable fields are not supported and set to nullable.
Args:
pa_schema (`pyarrow.Schema`):
Arrow Schema.
Returns:
[`Features`]
"""
# try to load features from the arrow schema metadata
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
return Features.from_dict(metadata["info"]["features"])
obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema}
return cls(**obj)
@classmethod
def from_dict(cls, dic) -> "Features":
"""
Construct [`Features`] from dict.
Regenerate the nested feature object from a deserialized dict.
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
It allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
dtypes that [`Value`] automatically performs.
Args:
dic (`dict[str, Any]`):
Python dictionary.
Returns:
`Features`
Example::
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
{'_type': Value(dtype='string', id=None)}
"""
obj = generate_from_dict(dic)
return cls(**obj)
def to_dict(self):
return asdict(self)
def _to_yaml_list(self) -> list:
# we compute the YAML list from the dict representation that is used for JSON dump
yaml_data = self.to_dict()
def simplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: -> sequence: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
feature["sequence"] = feature["sequence"]["dtype"]
#
# sequence: -> sequence:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
feature["sequence"] = feature["sequence"]["struct"]
#
# list: -> list: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
feature["list"] = feature["list"]["dtype"]
#
# list: -> list:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
feature["list"] = feature["list"]["struct"]
#
# class_label: -> class_label:
# names: -> names:
# - negative -> '0': negative
# - positive -> '1': positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
# server-side requirement: keys must be strings
feature["class_label"]["names"] = {
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
}
return feature
def to_yaml_inner(obj: Union[dict, list]) -> dict:
if isinstance(obj, dict):
_type = obj.pop("_type", None)
if _type == "Sequence":
_feature = obj.pop("feature")
return simplify({"sequence": to_yaml_inner(_feature), **obj})
elif _type == "Value":
return obj
elif _type and not obj:
return {"dtype": camelcase_to_snakecase(_type)}
elif _type:
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
else:
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
elif isinstance(obj, list):
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return to_yaml_inner(yaml_data)["struct"]
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "Features":
yaml_data = copy.deepcopy(yaml_data)
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
def unsimplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: int32 -> sequence:
# -> dtype: int32
#
if isinstance(feature.get("sequence"), str):
feature["sequence"] = {"dtype": feature["sequence"]}
#
# list: int32 -> list:
# -> dtype: int32
#
if isinstance(feature.get("list"), str):
feature["list"] = {"dtype": feature["list"]}
#
# class_label: -> class_label:
# names: -> names:
# '0': negative -> - negative
# '1': positive -> - positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
label_ids = sorted(feature["class_label"]["names"], key=int)
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
raise ValueError(
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
)
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
return feature
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
if isinstance(obj, dict):
if not obj:
return {}
_type = next(iter(obj))
if _type == "sequence":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
if _type == "list":
return [from_yaml_inner(unsimplify(obj)[_type])]
if _type == "struct":
return from_yaml_inner(obj["struct"])
elif _type == "dtype":
if isinstance(obj["dtype"], str):
# e.g. int32, float64, string, audio, image
try:
Value(obj["dtype"])
return {**obj, "_type": "Value"}
except ValueError:
# for audio and image that are Audio and Image types, not Value
return {"_type": snakecase_to_camelcase(obj["dtype"])}
else:
return from_yaml_inner(obj["dtype"])
else:
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
elif isinstance(obj, list):
names = [_feature.pop("name") for _feature in obj]
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return cls.from_dict(from_yaml_inner(yaml_data))
def encode_example(self, example):
"""
Encode example into a format for Arrow.
Args:
example (`dict[str, Any]`):
Data in a Dataset row.
Returns:
`dict[str, Any]`
"""
example = cast_to_python_objects(example)
return encode_nested_example(self, example)
def encode_column(self, column, column_name: str):
"""
Encode column into a format for Arrow.
Args:
column (`list[Any]`):
Data in a Dataset column.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
column = cast_to_python_objects(column)
return [encode_nested_example(self[column_name], obj) for obj in column]
def encode_batch(self, batch):
"""
Encode batch into a format for Arrow.
Args:
batch (`dict[str, list[Any]]`):
Data in a Dataset batch.
Returns:
`dict[str, list[Any]]`
"""
encoded_batch = {}
if set(batch) != set(self):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode example with custom feature decoding.
Args:
example (`dict[str, Any]`):
Dataset row data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary `repo_id (str) -> token (bool or str)`.
Returns:
`dict[str, Any]`
"""
return {
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
if self._column_requires_decoding[column_name]
else value
for column_name, (feature, value) in zip_dict(
{key: value for key, value in self.items() if key in example}, example
)
}
def decode_column(self, column: list, column_name: str):
"""Decode column with custom feature decoding.
Args:
column (`list[Any]`):
Dataset column data.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
return (
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
if self._column_requires_decoding[column_name]
else column
)
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode batch with custom feature decoding.
Args:
batch (`dict[str, list[Any]]`):
Dataset batch data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary repo_id (str) -> token (bool or str)
Returns:
`dict[str, list[Any]]`
"""
decoded_batch = {}
for column_name, column in batch.items():
decoded_batch[column_name] = (
[
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
if value is not None
else None
for value in column
]
if self._column_requires_decoding[column_name]
else column
)
return decoded_batch
def copy(self) -> "Features":
"""
Make a deep copy of [`Features`].
Returns:
[`Features`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> copy_of_features = ds.features.copy()
>>> copy_of_features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return copy.deepcopy(self)
def reorder_fields_as(self, other: "Features") -> "Features":
"""
Reorder Features fields to match the field order of other [`Features`].
The order of the fields is important since it matters for the underlying arrow data.
Re-ordering the fields allows to make the underlying arrow data type match.
Args:
other ([`Features`]):
The other [`Features`] to align with.
Returns:
[`Features`]
Example::
>>> from datasets import Features, Sequence, Value
>>> # let's say we have to features with a different order of nested fields (for a and b for example)
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
>>> assert f1.type != f2.type
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
>>> f1.reorder_fields_as(f2)
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
>>> assert f1.reorder_fields_as(f2).type == f2.type
"""
def recursive_reorder(source, target, stack=""):
stack_position = " at " + stack[1:] if stack else ""
if isinstance(target, Sequence):
target = target.feature
if isinstance(target, dict):
target = {k: [v] for k, v in target.items()}
else:
target = [target]
if isinstance(source, Sequence):
source, id_, length = source.feature, source.id, source.length
if isinstance(source, dict):
source = {k: [v] for k, v in source.items()}
reordered = recursive_reorder(source, target, stack)
return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
else:
source = [source]
reordered = recursive_reorder(source, target, stack)
return Sequence(reordered[0], id=id_, length=length)
elif isinstance(source, dict):
if not isinstance(target, dict):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if sorted(source) != sorted(target):
message = (
f"Keys mismatch: between {source} (source) and {target} (target).\n"
f"{source.keys()-target.keys()} are missing from target "
f"and {target.keys()-source.keys()} are missing from source" + stack_position
)
raise ValueError(message)
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
elif isinstance(source, list):
if not isinstance(target, list):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if len(source) != len(target):
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
else:
return source
return Features(recursive_reorder(self, other))
def flatten(self, max_depth=16) -> "Features":
"""Flatten the features. Every dictionary column is removed and is replaced by
all the subfields it contains. The new fields are named by concatenating the
name of the original column and the subfield name like this: `<original>.<subfield>`.
If a column contains nested dictionaries, then all the lower-level subfields names are
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
Returns:
[`Features`]:
The flattened features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features.flatten()
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
"""
for depth in range(1, max_depth):
no_change = True
flattened = self.copy()
for column_name, subfeature in self.items():
if isinstance(subfeature, dict):
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
del flattened[column_name]
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
no_change = False
flattened.update(
{
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
for k, v in subfeature.feature.items()
}
)
del flattened[column_name]
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
del flattened[column_name]
self = flattened
if no_change:
break
return self
def _align_features(features_list: List[Features]) -> List[Features]:
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
def _check_if_features_can_be_aligned(features_list: List[Features]):
"""Check if the dictionaries of features can be aligned.
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
"""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
for features in features_list:
for k, v in features.items():
if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
raise ValueError(
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
)
| datasets-main | src/datasets/features/features.py |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| datasets-main | src/datasets/features/translation.py |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Audio:
"""Audio [`Feature`] to extract audio data from an audio file.
Input: The Audio feature accepts as input:
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `bytes`: Bytes content of the audio file.
This is useful for archived files with sequential access.
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `array`: Array containing the audio sample
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
This is useful for archived files with sequential access.
Args:
sampling_rate (`int`, *optional*):
Target sampling rate. If `None`, the native sampling rate is used.
mono (`bool`, defaults to `True`):
Whether to convert the audio signal to mono by averaging samples across
channels.
decode (`bool`, defaults to `True`):
Whether to decode the audio data. If `False`,
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
"""
sampling_rate: Optional[int] = None
mono: bool = True
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Audio", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str` or `dict`):
Data passed as input to Audio feature.
Returns:
`dict`
"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(value, str):
return {"bytes": None, "path": value}
elif isinstance(value, bytes):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
buffer = BytesIO()
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
else:
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
buffer = BytesIO(bytes())
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""Cast an Arrow array to the Audio arrow storage type.
The Arrow types that can be converted to the Audio pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the audio bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
Args:
storage (`Union[pa.StringArray, pa.StructArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed audio files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
| datasets-main | src/datasets/features/audio.py |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_VALID_IMAGE_ARRAY_DTPYES = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class Image:
"""Image [`Feature`] to read image data from an image file.
Input: The Image feature accepts as input:
- A `str`: Absolute path to the image file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the image file to the archive file.
- `bytes`: Bytes of the image file.
This is useful for archived files with sequential access.
- An `np.ndarray`: NumPy array representing an image.
- A `PIL.Image.Image`: PIL image object.
Args:
decode (`bool`, defaults to `True`):
Whether to decode the image data. If `False`,
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
Examples:
```py
>>> from datasets import load_dataset, Image
>>> ds = load_dataset("beans", split="train")
>>> ds.features["image"]
Image(decode=True, id=None)
>>> ds[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
>>> ds = ds.cast_column('image', Image(decode=False))
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
```
"""
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
Data passed as input to Image feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, list):
value = np.array(value)
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(value)
elif isinstance(value, PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (`str` or `dict`):
A string with the absolute image file path, a dictionary with
keys:
- `path`: String with absolute or relative image file path.
- `bytes`: The bytes of the image file.
token_per_repo_id (`dict`, *optional*):
To access and decode
image files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
token_per_repo_id = {}
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the image bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the image array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
def list_image_compression_formats() -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
buffer = BytesIO()
if image.format in list_image_compression_formats():
format = image.format
else:
format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(buffer, format=format)
return buffer.getvalue()
def encode_pil_image(image: "PIL.Image.Image") -> dict:
if hasattr(image, "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(image)}
def encode_np_array(array: np.ndarray) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
dtype = array.dtype
dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
dtype_kind = dtype.kind
dtype_itemsize = dtype.itemsize
dest_dtype = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
dest_dtype = np.dtype("|u1")
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
)
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
dest_dtype = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
dest_dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
dest_dtype = np.dtype(dest_dtype_str)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
)
image = PIL.Image.fromarray(array.astype(dest_dtype))
return {"path": None, "bytes": image_to_bytes(image)}
def objects_to_list_of_image_dicts(
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]]
) -> List[dict]:
"""Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
_, obj = first_non_null_value(objs)
if isinstance(obj, str):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(obj, np.ndarray):
obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
return [obj_to_image_dict_func(obj) for obj in objs]
elif isinstance(obj, PIL.Image.Image):
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
return [obj_to_image_dict_func(obj) for obj in objs]
else:
return objs
else:
return objs
| datasets-main | src/datasets/features/image.py |
import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
| datasets-main | src/datasets/packaged_modules/__init__.py |
datasets-main | src/datasets/packaged_modules/folder_based_builder/__init__.py |
|
import collections
import itertools
import os
from dataclasses import dataclass
from typing import List, Optional, Tuple, Type
import pandas as pd
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.features.features import FeatureType
from datasets.tasks.base import TaskTemplate
logger = datasets.utils.logging.get_logger(__name__)
def count_path_segments(path):
return path.replace("\\", "/").count("/")
@dataclass
class FolderBasedBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for AutoFolder."""
features: Optional[datasets.Features] = None
drop_labels: bool = None
drop_metadata: bool = None
class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
"""
Base class for generic data loaders for vision and image data.
Abstract class attributes to be overridden by a child class:
BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
will be included in a dataset)
CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
"""
BASE_FEATURE: Type[FeatureType]
BASE_COLUMN_NAME: str
BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
EXTENSIONS: List[str]
CLASSIFICATION_TASK: TaskTemplate
METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
# Do an early pass if:
# * `drop_labels` is None (default) or False, to infer the class labels
# * `drop_metadata` is None (default) or False, to find the metadata files
do_analyze = not self.config.drop_labels or not self.config.drop_metadata
labels, path_depths = set(), set()
metadata_files = collections.defaultdict(set)
def analyze(files_or_archives, downloaded_files_or_dirs, split):
if len(downloaded_files_or_dirs) == 0:
return
# The files are separated from the archives at this point, so check the first sample
# to see if it's a file or a directory and iterate accordingly
if os.path.isfile(downloaded_files_or_dirs[0]):
original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
for original_file, downloaded_file in zip(original_files, downloaded_files):
original_file, downloaded_file = str(original_file), str(downloaded_file)
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(original_file)))
path_depths.add(count_path_segments(original_file))
elif os.path.basename(original_file) in self.METADATA_FILENAMES:
metadata_files[split].add((original_file, downloaded_file))
else:
original_file_name = os.path.basename(original_file)
logger.debug(
f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
)
else:
archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
for archive, downloaded_dir in zip(archives, downloaded_dirs):
archive, downloaded_dir = str(archive), str(downloaded_dir)
for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
path_depths.add(count_path_segments(downloaded_dir_file))
elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
metadata_files[split].add((None, downloaded_dir_file))
else:
archive_file_name = os.path.basename(archive)
original_file_name = os.path.basename(downloaded_dir_file)
logger.debug(
f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
)
data_files = self.config.data_files
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files, archives = self._split_files_and_archives(files)
downloaded_files = dl_manager.download(files)
downloaded_dirs = dl_manager.download_and_extract(archives)
if do_analyze: # drop_metadata is None or False, drop_labels is None or False
logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
analyze(files, downloaded_files, split_name)
analyze(archives, downloaded_dirs, split_name)
if metadata_files:
# add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
add_metadata = not self.config.drop_metadata
# if `metadata_files` are found, add labels only if
# `drop_labels` is set up to False explicitly (not-default behavior)
add_labels = self.config.drop_labels is False
else:
# if `metadata_files` are not found, don't add metadata
add_metadata = False
# if `metadata_files` are not found and `drop_labels` is None (default) -
# add labels if files are on the same level in directory hierarchy and there is more than one label
add_labels = (
(len(labels) > 1 and len(path_depths) == 1)
if self.config.drop_labels is None
else not self.config.drop_labels
)
if add_labels:
logger.info("Adding the labels inferred from data directories to the dataset's features...")
if add_metadata:
logger.info("Adding metadata to the dataset...")
else:
add_labels, add_metadata, metadata_files = False, False, {}
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
"files": [(file, downloaded_file) for file, downloaded_file in zip(files, downloaded_files)]
+ [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
"metadata_files": metadata_files,
"split_name": split_name,
"add_labels": add_labels,
"add_metadata": add_metadata,
},
)
)
if add_metadata:
# Verify that:
# * all metadata files have the same set of features
# * the `file_name` key is one of the metadata keys and is of type string
features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
# Check that all metadata files share the same format
metadata_ext = {
os.path.splitext(downloaded_metadata_file)[1][1:]
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values())
}
if len(metadata_ext) > 1:
raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
metadata_ext = metadata_ext.pop()
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
pa_metadata_table = self._read_metadata(downloaded_metadata_file)
features_per_metadata_file.append(
(downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
)
for downloaded_metadata_file, metadata_features in features_per_metadata_file:
if metadata_features != features_per_metadata_file[0][1]:
raise ValueError(
f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
)
metadata_features = features_per_metadata_file[0][1]
if "file_name" not in metadata_features:
raise ValueError("`file_name` must be present as dictionary key in metadata files")
if metadata_features["file_name"] != datasets.Value("string"):
raise ValueError("`file_name` key must be a string")
del metadata_features["file_name"]
else:
metadata_features = None
# Normally, we would do this in _info, but we need to know the labels and/or metadata
# before building the features
if self.config.features is None:
if add_labels:
self.info.features = datasets.Features(
{
self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
"label": datasets.ClassLabel(names=sorted(labels)),
}
)
self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
else:
self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
if add_metadata:
# Warn if there are duplicated keys in metadata compared to the existing features
# (`BASE_COLUMN_NAME`, optionally "label")
duplicated_keys = set(self.info.features) & set(metadata_features)
if duplicated_keys:
logger.warning(
f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
f"the features dictionary."
)
# skip metadata duplicated keys
self.info.features.update(
{
feature: metadata_features[feature]
for feature in metadata_features
if feature not in duplicated_keys
}
)
return splits
def _split_files_and_archives(self, data_files):
files, archives = [], []
for data_file in data_files:
_, data_file_ext = os.path.splitext(data_file)
if data_file_ext.lower() in self.EXTENSIONS:
files.append(data_file)
elif os.path.basename(data_file) in self.METADATA_FILENAMES:
files.append(data_file)
else:
archives.append(data_file)
return files, archives
def _read_metadata(self, metadata_file):
metadata_file_ext = os.path.splitext(metadata_file)[1][1:]
if metadata_file_ext == "csv":
# Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
return pa.Table.from_pandas(pd.read_csv(metadata_file))
else:
with open(metadata_file, "rb") as f:
return paj.read_json(f)
def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
split_metadata_files = metadata_files.get(split_name, [])
sample_empty_metadata = (
{k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
)
last_checked_dir = None
metadata_dir = None
metadata_dict = None
downloaded_metadata_file = None
if split_metadata_files:
metadata_ext = {
os.path.splitext(downloaded_metadata_file)[1][1:]
for _, downloaded_metadata_file in split_metadata_files
}
metadata_ext = metadata_ext.pop()
file_idx = 0
for original_file, downloaded_file_or_dir in files:
if original_file is not None:
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
# If the file is a file of a needed type, and we've just entered a new directory,
# find the nereast metadata file (by counting path segments) for the directory
current_dir = os.path.dirname(original_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is not None # ignore metadata_files that are inside archives
and not os.path.relpath(
original_file, os.path.dirname(metadata_file_candidate)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(downloaded_metadata_file)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
file_relpath = os.path.relpath(original_file, metadata_dir)
file_relpath = file_relpath.replace("\\", "/")
if file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[file_relpath]
else:
raise ValueError(
f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
else:
sample_label = {}
yield file_idx, {
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_file_or_dir,
**sample_metadata,
**sample_label,
}
file_idx += 1
else:
for downloaded_dir_file in downloaded_file_or_dir:
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
current_dir = os.path.dirname(downloaded_dir_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is None # ignore metadata_files that are not inside archives
and not os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(downloaded_metadata_file)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(downloaded_metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
if downloaded_dir_file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[downloaded_dir_file_relpath]
else:
raise ValueError(
f"One or several metadata.{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
else:
sample_label = {}
yield file_idx, {
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_dir_file,
**sample_metadata,
**sample_label,
}
file_idx += 1
| datasets-main | src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
import itertools
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets-main | src/datasets/packaged_modules/arrow/arrow.py |
datasets-main | src/datasets/packaged_modules/arrow/__init__.py |
|
datasets-main | src/datasets/packaged_modules/imagefolder/__init__.py |
|
from typing import List
import datasets
from datasets.tasks import ImageClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
".h5",
".hdf",
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
| datasets-main | src/datasets/packaged_modules/imagefolder/imagefolder.py |
datasets-main | src/datasets/packaged_modules/generator/__init__.py |
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
assert self.generator is not None, "generator must be specified"
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
| datasets-main | src/datasets/packaged_modules/generator/generator.py |
datasets-main | src/datasets/packaged_modules/json/__init__.py |
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
field: Optional[str] = None
use_threads: bool = True # deprecated
block_size: Optional[int] = None # deprecated
chunksize: int = 10 << 20 # 10MB
newlines_in_values: Optional[bool] = None
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
self.config.chunksize = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
)
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
type = self.config.features.arrow_schema.field(column_name).type
pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
dataset = json.load(f)
# We keep only the field we are interested in
dataset = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(dataset, (list, tuple)):
keys = set().union(*[row.keys() for row in dataset])
mapping = {col: [row.get(col) for row in dataset] for col in keys}
else:
mapping = dataset
pa_table = pa.Table.from_pydict(mapping)
yield file_idx, self._cast_table(pa_table)
# If the file has one json object per line
else:
with open(file, "rb") as f:
batch_idx = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
block_size = max(self.config.chunksize // 32, 16 << 10)
encoding_errors = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(f)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
try:
while True:
try:
pa_table = paj.read_json(
io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
)
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(e, pa.ArrowInvalid)
and "straddling" not in str(e)
or block_size > len(batch)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
)
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
file, encoding=self.config.encoding, errors=self.config.encoding_errors
) as f:
dataset = json.load(f)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(dataset, list): # list is the only sequence type supported in JSON
try:
keys = set().union(*[row.keys() for row in dataset])
mapping = {col: [row.get(col) for row in dataset] for col in keys}
pa_table = pa.Table.from_pydict(mapping)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(pa_table)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
| datasets-main | src/datasets/packaged_modules/json/json.py |
datasets-main | src/datasets/packaged_modules/csv/__init__.py |
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
logger = datasets.utils.logging.get_logger(__name__)
_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]
_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]
_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]
@dataclass
class CsvConfig(datasets.BuilderConfig):
"""BuilderConfig for CSV."""
sep: str = ","
delimiter: Optional[str] = None
header: Optional[Union[int, List[int], str]] = "infer"
names: Optional[List[str]] = None
column_names: Optional[List[str]] = None
index_col: Optional[Union[int, str, List[int], List[str]]] = None
usecols: Optional[Union[List[int], List[str]]] = None
prefix: Optional[str] = None
mangle_dupe_cols: bool = True
engine: Optional[Literal["c", "python", "pyarrow"]] = None
converters: Dict[Union[int, str], Callable[[Any], Any]] = None
true_values: Optional[list] = None
false_values: Optional[list] = None
skipinitialspace: bool = False
skiprows: Optional[Union[int, List[int]]] = None
nrows: Optional[int] = None
na_values: Optional[Union[str, List[str]]] = None
keep_default_na: bool = True
na_filter: bool = True
verbose: bool = False
skip_blank_lines: bool = True
thousands: Optional[str] = None
decimal: str = "."
lineterminator: Optional[str] = None
quotechar: str = '"'
quoting: int = 0
escapechar: Optional[str] = None
comment: Optional[str] = None
encoding: Optional[str] = None
dialect: Optional[str] = None
error_bad_lines: bool = True
warn_bad_lines: bool = True
skipfooter: int = 0
doublequote: bool = True
memory_map: bool = False
float_precision: Optional[str] = None
chunksize: int = 10_000
features: Optional[datasets.Features] = None
encoding_errors: Optional[str] = "strict"
on_bad_lines: Literal["error", "warn", "skip"] = "error"
date_format: Optional[str] = None
def __post_init__(self):
if self.delimiter is not None:
self.sep = self.delimiter
if self.column_names is not None:
self.names = self.column_names
@property
def pd_read_csv_kwargs(self):
pd_read_csv_kwargs = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class Csv(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self, files):
schema = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
dtype = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(csv_file_reader):
pa_table = pa.Table.from_pandas(df)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets-main | src/datasets/packaged_modules/csv/csv.py |
datasets-main | src/datasets/packaged_modules/text/__init__.py |
|
import itertools
import warnings
from dataclasses import InitVar, dataclass
from io import StringIO
from typing import Optional
import pyarrow as pa
import datasets
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class TextConfig(datasets.BuilderConfig):
"""BuilderConfig for text files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
errors: InitVar[Optional[str]] = "deprecated"
encoding_errors: Optional[str] = None
chunksize: int = 10 << 20 # 10MB
keep_linebreaks: bool = False
sample_by: str = "line"
def __post_init__(self, errors):
if errors != "deprecated":
warnings.warn(
"'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'encoding_errors={errors}' instead.",
FutureWarning,
)
self.encoding_errors = errors
class Text(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = TextConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"text": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
if self.config.sample_by == "line":
batch_idx = 0
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
# StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
batch = StringIO(batch).readlines()
if not self.config.keep_linebreaks:
batch = [line.rstrip("\n") for line in batch]
pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
elif self.config.sample_by == "paragraph":
batch_idx = 0
batch = ""
while True:
new_batch = f.read(self.config.chunksize)
if not new_batch:
break
batch += new_batch
batch += f.readline() # finish current line
batch = batch.split("\n\n")
pa_table = pa.Table.from_arrays(
[pa.array([example for example in batch[:-1] if example])], names=pa_table_names
)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
batch = batch[-1]
if batch:
pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
yield (file_idx, batch_idx), self._cast_table(pa_table)
elif self.config.sample_by == "document":
text = f.read()
pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table)
| datasets-main | src/datasets/packaged_modules/text/text.py |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: int = 10_000
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
features = datasets.Features.from_arrow_schema(pq.read_schema(f))
if self.config.columns is not None:
features = datasets.Features(
{col: feat for col, feat in features.items() if col in self.config.columns}
)
self.info.features = features
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
| datasets-main | src/datasets/packaged_modules/parquet/parquet.py |
datasets-main | src/datasets/packaged_modules/parquet/__init__.py |
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
| datasets-main | src/datasets/packaged_modules/audiofolder/audiofolder.py |
datasets-main | src/datasets/packaged_modules/audiofolder/__init__.py |
|
datasets-main | src/datasets/packaged_modules/spark/__init__.py |
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
logger = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SparkConfig(datasets.BuilderConfig):
"""BuilderConfig for Spark."""
features: Optional[datasets.Features] = None
def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]):
df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}")
for partition_id in new_partition_order[1:]:
partition_df = df.select("*").where(f"part_id = {partition_id}")
df_combined = df_combined.union(partition_df)
return df_combined
def _generate_iterable_examples(
df: "pyspark.sql.DataFrame",
partition_order: List[int],
):
import pyspark
def generate_fn():
df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id"))
partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order)
row_id = 0
# pipeline next partition in parallel to hide latency
rows = partition_df.toLocalIterator(prefetchPartitions=True)
curr_partition = -1
for row in rows:
row_as_dict = row.asDict()
part_id = row_as_dict["part_id"]
row_as_dict.pop("part_id")
if curr_partition != part_id:
curr_partition = part_id
row_id = 0
yield f"{part_id}_{row_id}", row_as_dict
row_id += 1
return generate_fn
class SparkExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
df: "pyspark.sql.DataFrame",
partition_order=None,
):
self.df = df
self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order)
def __iter__(self):
yield from self.generate_examples_fn()
def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
partition_order = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(partition_order)
return SparkExamplesIterable(self.df, partition_order=partition_order)
def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable":
partition_order = self.split_shard_indices_by_worker(worker_id, num_workers)
return SparkExamplesIterable(self.df, partition_order=partition_order)
@property
def n_shards(self) -> int:
return len(self.partition_order)
class Spark(datasets.DatasetBuilder):
BUILDER_CONFIG_CLASS = SparkConfig
def __init__(
self,
df: "pyspark.sql.DataFrame",
cache_dir: str = None,
working_dir: str = None,
**config_kwargs,
):
import pyspark
self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
self.df = df
self._working_dir = working_dir
super().__init__(
cache_dir=cache_dir,
config_name=str(self.df.semanticHash()),
**config_kwargs,
)
def _validate_cache_dir(self):
# Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
# error due to pickling the SparkContext.
cache_dir = self._cache_dir
# Returns the path of the created file.
def create_cache_and_write_probe(context):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(cache_dir, exist_ok=True)
probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(probe_file, "a")
return [probe_file]
if self._spark.conf.get("spark.master", "").startswith("local"):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
probe = (
self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
)
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _repartition_df_if_needed(self, max_shard_size):
import pyspark
def get_arrow_batch_size(it):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]})
df_num_rows = self.df.count()
sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
approx_bytes_per_row = (
self.df.limit(sample_num_rows)
.repartition(1)
.mapInArrow(get_arrow_batch_size, "batch_bytes: long")
.agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
approx_total_size = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
self.df = self.df.repartition(new_num_partitions)
def _prepare_split_single(
self,
fpath: str,
file_format: str,
max_shard_size: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
embed_local_files = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
features = self.config.features
writer_batch_size = self._writer_batch_size
storage_options = self._fs.storage_options
def write_arrow(it):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
task_id = pyspark.TaskContext().taskAttemptId()
first_batch = next(it, None)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id = 0
writer = writer_class(
features=features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([first_batch])
writer.write_table(table)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id += 1
writer = writer_class(
features=writer._features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([batch])
writer.write_table(table)
if writer._num_bytes > 0:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(working_fpath)):
dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
shutil.move(file, dest)
stats = (
self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
.groupBy("task_id")
.agg(
pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
pyspark.sql.functions.count("num_bytes").alias("num_shards"),
pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _prepare_split(
self,
split_generator: "datasets.SplitGenerator",
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
self._validate_cache_dir()
max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(max_shard_size)
is_local = not is_remote_filesystem(self._fs)
path_join = os.path.join if is_local else posixpath.join
SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = path_join(self._output_dir, fname)
total_num_examples = 0
total_num_bytes = 0
total_shards = 0
task_id_and_num_shards = []
all_shard_lengths = []
for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
(
num_examples,
num_bytes,
num_shards,
shard_lengths,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(shard_lengths)
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
split_generator.split_info.shard_lengths = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
fs = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
task_id: int,
shard_id: int,
global_shard_id: int,
):
rename(
fs,
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
args = []
global_shard_id = 0
for i in range(len(task_id_and_num_shards)):
task_id, num_shards = task_id_and_num_shards[i]
for shard_id in range(num_shards):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
else:
# don't use any pattern
shard_id = 0
task_id = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace(SUFFIX, ""),
)
def _get_examples_iterable_for_split(
self,
split_generator: "datasets.SplitGenerator",
) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df)
| datasets-main | src/datasets/packaged_modules/spark/spark.py |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table)
| datasets-main | src/datasets/packaged_modules/pandas/pandas.py |
datasets-main | src/datasets/packaged_modules/pandas/__init__.py |
|
datasets-main | src/datasets/packaged_modules/sql/__init__.py |
|
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table)
| datasets-main | src/datasets/packaged_modules/sql/sql.py |
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities. """
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
def _get_default_logging_level():
"""
If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.
If it is not - fall back to ``_default_log_level``
"""
env_level_str = os.getenv("DATASETS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(logging.StreamHandler())
library_root_logger.setLevel(_get_default_logging_level())
def _reset_library_root_logger() -> None:
library_root_logger = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""Return a logger with the specified name.
This function can be used in dataset scripts.
"""
if name is None:
name = _get_library_name()
return logging.getLogger(name)
def get_verbosity() -> int:
"""Return the current level for the HuggingFace datasets library's root logger.
Returns:
Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
<Tip>
HuggingFace datasets library has following logging levels:
- `datasets.logging.CRITICAL`, `datasets.logging.FATAL`
- `datasets.logging.ERROR`
- `datasets.logging.WARNING`, `datasets.logging.WARN`
- `datasets.logging.INFO`
- `datasets.logging.DEBUG`
</Tip>
"""
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""Set the level for the Hugging Face Datasets library's root logger.
Args:
verbosity:
Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
"""
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the level for the Hugging Face datasets library's root logger to `INFO`.
This will display most of the logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.
"""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the level for the Hugging Face datasets library's root logger to `WARNING`.
This will display only the warning and errors logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.
"""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the level for the Hugging Face datasets library's root logger to `DEBUG`.
This will display all the logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.
"""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the level for the Hugging Face datasets library's root logger to `ERROR`.
This will display only the errors logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.
"""
return set_verbosity(ERROR)
def disable_propagation() -> None:
"""Disable propagation of the library log outputs.
Note that log propagation is disabled by default.
"""
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""Enable propagation of the library log outputs.
Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has
been configured.
"""
_get_library_root_logger().propagate = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class EmptyTqdm:
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
_tqdm_active = True
class _tqdm_cls:
def __call__(self, *args, disable=False, **kwargs):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def set_lock(self, *args, **kwargs):
self._lock = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
def get_lock(self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
def __delattr__(self, attr):
"""fix for https://github.com/huggingface/datasets/issues/6066"""
try:
del self.__dict__[attr]
except KeyError:
if attr != "_lock":
raise AttributeError(attr)
tqdm = _tqdm_cls()
def is_progress_bar_enabled() -> bool:
"""Return a boolean indicating whether tqdm progress bars are enabled."""
global _tqdm_active
return bool(_tqdm_active)
def enable_progress_bar():
"""Enable tqdm progress bar."""
global _tqdm_active
_tqdm_active = True
def disable_progress_bar():
"""Disable tqdm progress bar."""
global _tqdm_active
_tqdm_active = False
| datasets-main | src/datasets/utils/logging.py |
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
| datasets-main | src/datasets/utils/sharding.py |
"""Contains utilities to flag a feature as "experimental" in datasets."""
import warnings
from functools import wraps
from typing import Callable
def experimental(fn: Callable) -> Callable:
"""Decorator to flag a feature as experimental.
An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.
Args:
fn (`Callable`):
The function to flag as experimental.
Returns:
`Callable`: The decorated function.
Example:
```python
>>> from datasets.utils import experimental
>>> @experimental
... def my_function():
... print("Hello world!")
>>> my_function()
UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future.
Hello world!
```
"""
@wraps(fn)
def _inner_fn(*args, **kwargs):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."),
UserWarning,
)
return fn(*args, **kwargs)
return _inner_fn
| datasets-main | src/datasets/utils/experimental.py |
import textwrap
from collections import Counter
from pathlib import Path
from typing import Any, ClassVar, Dict, Optional, Tuple, Union
import yaml
from huggingface_hub import DatasetCardData
from ..config import METADATA_CONFIGS_FIELD
from ..utils.logging import get_logger
from .deprecation_utils import deprecated
logger = get_logger(__name__)
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping
def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
full_content = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
sep_idx = full_content[1:].index("---") + 1
yamlblock = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(full_content)
@deprecated("Use `huggingface_hub.DatasetCardData` instead.")
class DatasetMetadata(dict):
# class attributes
_FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from its dataset card (README.md)
Args:
path (:obj:`Path`): Path to the dataset card (its README.md file)
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
with open(path, encoding="utf-8") as readme_file:
yaml_string, _ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(yaml_string)
else:
return cls()
def to_readme(self, path: Path):
if path.exists():
with open(path, encoding="utf-8") as readme_file:
readme_content = readme_file.read()
else:
readme_content = None
updated_readme_content = self._to_readme(readme_content)
with open(path, "w", encoding="utf-8") as readme_file:
readme_file.write(updated_readme_content)
def _to_readme(self, readme_content: Optional[str] = None) -> str:
if readme_content is not None:
_, content = _split_yaml_from_readme(readme_content)
full_content = "---\n" + self.to_yaml_string() + "---\n" + content
else:
full_content = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def from_yaml_string(cls, string: str) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from a YAML string
Args:
string (:obj:`str`): The YAML string
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
metadata_dict = {
(key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**metadata_dict)
def to_yaml_string(self) -> str:
return yaml.safe_dump(
{
(key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},
sort_keys=False,
allow_unicode=True,
encoding="utf-8",
).decode("utf-8")
class MetadataConfigs(Dict[str, Dict[str, Any]]):
"""Should be in format {config_name: {**config_params}}."""
FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
@staticmethod
def _raise_if_data_files_field_not_valid(metadata_config: dict):
yaml_data_files = metadata_config.get("data_files")
if yaml_data_files is not None:
yaml_error_message = textwrap.dedent(
f"""
Expected data_files in YAML to be either a string or a list of strings
or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
Examples of data_files in YAML:
data_files: data.csv
data_files: data/*.png
data_files:
- part0/*
- part1/*
data_files:
- split: train
path: train/*
- split: test
path: test/*
data_files:
- split: train
path:
- train/part1/*
- train/part2/*
- split: test
path: test/*
"""
)
if not isinstance(yaml_data_files, (list, str)):
raise ValueError(yaml_error_message)
if isinstance(yaml_data_files, list):
for yaml_data_files_item in yaml_data_files:
if (
not isinstance(yaml_data_files_item, (str, dict))
or isinstance(yaml_data_files_item, dict)
and not (
len(yaml_data_files_item) == 2
and "split" in yaml_data_files_item
and isinstance(yaml_data_files_item.get("path"), (str, list))
)
):
raise ValueError(yaml_error_message)
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
if dataset_card_data.get(cls.FIELD_NAME):
metadata_configs = dataset_card_data[cls.FIELD_NAME]
if not isinstance(metadata_configs, list):
raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
for metadata_config in metadata_configs:
if "config_name" not in metadata_config:
raise ValueError(
f"Each config must include `config_name` field with a string name of a config, "
f"but got {metadata_config}. "
)
cls._raise_if_data_files_field_not_valid(metadata_config)
return cls(
{
config["config_name"]: {param: value for param, value in config.items() if param != "config_name"}
for config in metadata_configs
}
)
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
for metadata_config in self.values():
self._raise_if_data_files_field_not_valid(metadata_config)
current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
for config_name, config_metadata in total_metadata_configs.items():
config_metadata.pop("config_name", None)
dataset_card_data[self.FIELD_NAME] = [
{"config_name": config_name, **config_metadata}
for config_name, config_metadata in total_metadata_configs.items()
]
def get_default_config_name(self) -> Optional[str]:
default_config_name = None
for config_name, metadata_config in self.items():
if config_name == "default" or metadata_config.get("default"):
if default_config_name is None:
default_config_name = config_name
else:
raise ValueError(
f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
)
return default_config_name
# DEPRECATED - just here to support old versions of evaluate like 0.2.2
# To support new tasks on the Hugging Face Hub, please open a PR for this file:
# https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Types.ts
known_task_ids = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| datasets-main | src/datasets/utils/metadata.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class Version:
"""Dataset version `MAJOR.MINOR.PATCH`.
Args:
version_str (`str`):
The dataset version.
description (`str`):
A description of what is new in this version.
major (`str`):
minor (`str`):
patch (`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __hash__(self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version_tuple(version_str):
"""Return the tuple (major, minor, patch) version extracted from the str."""
res = _VERSION_REG.match(version_str)
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.")
return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
def _version_tuple_to_str(version_tuple):
"""Return the str version from the version tuple (major, minor, patch)."""
return ".".join(str(v) for v in version_tuple)
| datasets-main | src/datasets/utils/version.py |
# loading package files: https://stackoverflow.com/a/20885799
import importlib.resources as pkg_resources
import logging
from pathlib import Path
from typing import Any, List, Tuple
import yaml
from . import resources
from .deprecation_utils import deprecated
BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils"
this_url = f"{BASE_REF_URL}/{__file__}"
logger = logging.getLogger(__name__)
def load_yaml_resource(resource: str) -> Tuple[Any, str]:
content = pkg_resources.read_text(resources, resource)
return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}"
readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml")
FILLER_TEXT = [
"[Needs More Information]",
"[More Information Needed]",
"(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)",
]
# Dictionary representation of section/readme, error_list, warning_list
ReadmeValidatorOutput = Tuple[dict, List[str], List[str]]
class Section:
def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False):
self.name = name
self.level = level
self.lines = lines
self.text = ""
self.is_empty_text = True
self.content = {}
self.parsing_error_list = []
self.parsing_warning_list = []
if self.lines is not None:
self.parse(suppress_parsing_errors=suppress_parsing_errors)
def parse(self, suppress_parsing_errors: bool = False):
current_sub_level = ""
current_lines = []
code_start = False
for line in self.lines:
if line.strip(" \n") == "":
continue
elif line.strip(" \n")[:3] == "```":
code_start = not code_start
elif line.split()[0] == self.level + "#" and not code_start:
if current_sub_level != "":
self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
current_lines = []
else:
if current_lines != []:
self.text += "".join(current_lines).strip()
if self.text != "" and self.text not in FILLER_TEXT:
self.is_empty_text = False
current_lines = []
current_sub_level = " ".join(line.split()[1:]).strip(" \n")
else:
current_lines.append(line)
else:
if current_sub_level != "":
if current_sub_level in self.content:
self.parsing_error_list.append(
f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections."
)
self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
else:
if current_lines != []:
self.text += "".join(current_lines).strip()
if self.text != "" and self.text not in FILLER_TEXT:
self.is_empty_text = False
if self.level == "" and not suppress_parsing_errors:
if self.parsing_error_list != [] or self.parsing_warning_list != []:
errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list)
error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors
raise ValueError(error_string)
def validate(self, structure: dict) -> ReadmeValidatorOutput:
"""Validates a Section class object recursively using the structure provided as a dictionary.
Args:
structute (:obj: `dict`): The dictionary representing expected structure.
Returns:
:obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors.
"""
# Header text validation
error_list = []
warning_list = []
if structure["allow_empty"] is False:
# If content is expected
if self.is_empty_text and self.content == {}:
# If no content is found, mention it in the error_list
error_list.append(f"Expected some content in section `{self.name}` but it is empty.")
if structure["allow_empty_text"] is False:
# If some text is expected
if self.is_empty_text:
# If no text is found, mention it in the error_list
error_list.append(
f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)."
)
# Subsections Validation
if structure["subsections"] is not None:
# If subsections are expected
if self.content == {}:
# If no subsections are present
values = [subsection["name"] for subsection in structure["subsections"]]
# Mention the expected values in the error_list
error_list.append(
f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'."
)
else:
# If some subsections are present
structure_names = [subsection["name"] for subsection in structure["subsections"]]
has_missing_subsections = False
for idx, name in enumerate(structure_names):
if name not in self.content:
# If the expected subsection is not present
error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.")
has_missing_subsections = True
else:
# If the subsection is present, validate subsection, return the result
# and concat the errors from subsection to section error_list
# Skip sublevel validation if current level is `###`
if self.level == "###":
continue
else:
_, subsec_error_list, subsec_warning_list = self.content[name].validate(
structure["subsections"][idx]
)
error_list += subsec_error_list
warning_list += subsec_warning_list
if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here
for name in self.content:
if name not in structure_names:
# If an extra subsection is present
warning_list.append(
f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown."
)
if error_list:
# If there are errors, do not return the dictionary as it is invalid
return {}, error_list, warning_list
else:
return self.to_dict(), error_list, warning_list
def to_dict(self) -> dict:
"""Returns the dictionary representation of a section."""
return {
"name": self.name,
"text": self.text,
"is_empty_text": self.is_empty_text,
"subsections": [value.to_dict() for value in self.content.values()],
}
@deprecated("Use `huggingface_hub.DatasetCard` instead.")
class ReadMe(Section): # Level 0
def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False):
super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse
self.structure = structure
self.yaml_tags_line_count = -2
self.tag_count = 0
self.lines = lines
if self.lines is not None:
self.parse(suppress_parsing_errors=suppress_parsing_errors)
def validate(self):
if self.structure is None:
content, error_list, warning_list = self._validate(readme_structure)
else:
content, error_list, warning_list = self._validate(self.structure)
if error_list != [] or warning_list != []:
errors = "\n".join(["-\t" + x for x in error_list + warning_list])
error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors
raise ValueError(error_string)
@classmethod
def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False):
with open(path, encoding="utf-8") as f:
lines = f.readlines()
return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
@classmethod
def from_string(
cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False
):
lines = string.split("\n")
return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
def parse(self, suppress_parsing_errors: bool = False):
# Skip Tags
line_count = 0
for line in self.lines:
self.yaml_tags_line_count += 1
if line.strip(" \n") == "---":
self.tag_count += 1
if self.tag_count == 2:
break
line_count += 1
if self.tag_count == 2:
self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item.
else:
self.lines = self.lines[self.tag_count :]
super().parse(suppress_parsing_errors=suppress_parsing_errors)
def __str__(self):
"""Returns the string of dictionary representation of the ReadMe."""
return str(self.to_dict())
def _validate(self, readme_structure):
error_list = []
warning_list = []
if self.yaml_tags_line_count == 0:
warning_list.append("Empty YAML markers are present in the README.")
elif self.tag_count == 0:
warning_list.append("No YAML markers are present in the README.")
elif self.tag_count == 1:
warning_list.append("Only the start of YAML tags present in the README.")
# Check how many first level sections are present.
num_first_level_keys = len(self.content.keys())
if num_first_level_keys > 1:
# If more than one, add to the error list, continue
error_list.append(
f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README."
)
elif num_first_level_keys < 1:
# If less than one, append error.
error_list.append(
"The README has no first-level headings. One heading is expected. Skipping further validation for this README."
)
else:
# If one exactly
start_key = list(self.content.keys())[0] # Get the key
if start_key.startswith("Dataset Card for"): # Check correct start
# If the starting is correct, validate all the sections
_, sec_error_list, sec_warning_list = self.content[start_key].validate(
readme_structure["subsections"][0]
)
error_list += sec_error_list
warning_list += sec_warning_list
else:
# If not found, append error
error_list.append(
"No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
)
if error_list:
# If there are errors, do not return the dictionary as it is invalid
return {}, error_list, warning_list
else:
return self.to_dict(), error_list, warning_list
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
readme = ReadMe.from_readme(readme_filepath)
| datasets-main | src/datasets/utils/readme.py |
import os
from apache_beam.io.filesystems import FileSystems
from apache_beam.pipeline import Pipeline
from .logging import get_logger
CHUNK_SIZE = 2 << 20 # 2mb
logger = get_logger(__name__)
class BeamPipeline(Pipeline):
"""Wrapper over `apache_beam.pipeline.Pipeline` for convenience"""
def is_local(self):
runner = self._options.get_all_options().get("runner")
return runner in [None, "DirectRunner", "PortableRunner"]
def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False):
"""Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if fs.exists(remote_file_path):
if force_upload:
logger.info(f"Remote path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
else:
logger.info(f"Remote path already exist: {remote_file_path}. Skipping it as force_upload=False.")
return
with fs.create(remote_file_path) as remote_file:
with open(local_file_path, "rb") as local_file:
chunk = local_file.read(CHUNK_SIZE)
while chunk:
remote_file.write(chunk)
chunk = local_file.read(CHUNK_SIZE)
def download_remote_to_local(remote_file_path, local_file_path, force_download=False):
"""Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if os.path.exists(local_file_path):
if force_download:
logger.info(f"Local path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
else:
logger.info(f"Local path already exist: {remote_file_path}. Skipping it as force_upload=False.")
return
with fs.open(remote_file_path) as remote_file:
with open(local_file_path, "wb") as local_file:
chunk = remote_file.read(CHUNK_SIZE)
while chunk:
local_file.write(chunk)
chunk = remote_file.read(CHUNK_SIZE)
| datasets-main | src/datasets/utils/beam_utils.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| datasets-main | src/datasets/utils/__init__.py |
# deprecated, please use daatsets.download.download_manager
| datasets-main | src/datasets/utils/download_manager.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Some python utils function and classes.
"""
import copy
import functools
import itertools
import multiprocessing.pool
import os
import queue
import re
import types
import warnings
from contextlib import contextmanager
from dataclasses import fields, is_dataclass
from io import BytesIO as StringIO
from multiprocessing import Manager
from queue import Empty
from shutil import disk_usage
from types import CodeType, FunctionType
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union
from urllib.parse import urlparse
import dill
import multiprocess
import multiprocess.pool
import numpy as np
from packaging import version
from tqdm.auto import tqdm
from .. import config
from ..parallel import parallel_map
from . import logging
try: # pragma: no branch
import typing_extensions as _typing_extensions
from typing_extensions import Final, Literal
except ImportError:
_typing_extensions = Literal = Final = None
logger = logging.get_logger(__name__)
# NOTE: When used on an instance method, the cache is shared across all
# instances and IS NOT per-instance.
# See
# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
# For @property methods, use @memoized_property below.
memoize = functools.lru_cache
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "Unknown size".
For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "Unknown size"
_NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)]
size_in_bytes = float(size_in_bytes)
for name, size_bytes in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return f"{value:.2f} {name}"
return f"{int(size_in_bytes)} bytes"
def convert_file_size_to_int(size: Union[int, str]) -> int:
"""
Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
"""
if isinstance(size, int):
return size
if size.upper().endswith("PIB"):
return int(size[:-3]) * (2**50)
if size.upper().endswith("TIB"):
return int(size[:-3]) * (2**40)
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-3]) * (2**10)
if size.upper().endswith("PB"):
int_size = int(size[:-2]) * (10**15)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("TB"):
int_size = int(size[:-2]) * (10**12)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("GB"):
int_size = int(size[:-2]) * (10**9)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("MB"):
int_size = int(size[:-2]) * (10**6)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("KB"):
int_size = int(size[:-2]) * (10**3)
return int_size // 8 if size.endswith("b") else int_size
raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
def glob_pattern_to_regex(pattern):
# partially taken from fsspec:
# https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735
return (
pattern.replace("\\", r"\\")
.replace(".", r"\.")
.replace("*", ".*")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.rstrip("/")
.replace("?", ".")
)
def string_to_dict(string: str, pattern: str) -> Dict[str, str]:
"""Un-format a string using a python f-string pattern.
From https://stackoverflow.com/a/36838374
Example::
>>> p = 'hello, my name is {name} and I am a {age} year old {what}'
>>> s = p.format(name='cody', age=18, what='quarterback')
>>> s
'hello, my name is cody and I am a 18 year old quarterback'
>>> string_to_dict(s, p)
{'age': '18', 'name': 'cody', 'what': 'quarterback'}
Args:
string (str): input string
pattern (str): pattern formatted like a python f-string
Returns:
Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern
Raises:
ValueError: if the string doesn't match the pattern
"""
regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern)
result = re.search(regex, string)
if result is None:
raise ValueError(f"String {string} doesn't match the pattern {pattern}")
values = list(result.groups())
keys = re.findall(r"{(.+?)}", pattern)
_dict = dict(zip(keys, values))
return _dict
def asdict(obj):
"""Convert an object to its dictionary representation recursively.
<Added version="2.4.0"/>
"""
# Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict
def _is_dataclass_instance(obj):
# https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass
return is_dataclass(obj) and not isinstance(obj, type)
def _asdict_inner(obj):
if _is_dataclass_instance(obj):
result = {}
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name))
if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False):
result[f.name] = value
return result
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# obj is a namedtuple
return type(obj)(*[_asdict_inner(v) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v) for v in obj)
elif isinstance(obj, dict):
return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()}
else:
return copy.deepcopy(obj)
if not isinstance(obj, dict) and not _is_dataclass_instance(obj):
raise TypeError(f"{obj} is not a dict or a dataclass")
return _asdict_inner(obj)
@contextmanager
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original)
@contextmanager
def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
"""Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
np_state = np.random.get_state()
np.random.seed(seed)
if set_pytorch and config.TORCH_AVAILABLE:
import torch
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
if set_tensorflow and config.TF_AVAILABLE:
import tensorflow as tf
from tensorflow.python import context as tfpycontext
tf_state = tf.random.get_global_generator()
temp_gen = tf.random.Generator.from_seed(seed)
tf.random.set_global_generator(temp_gen)
if not tf.executing_eagerly():
raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
tf_context = tfpycontext.context() # eager mode context
tf_seed = tf_context._seed
tf_rng_initialized = hasattr(tf_context, "_rng")
if tf_rng_initialized:
tf_rng = tf_context._rng
tf_context._set_global_seed(seed)
try:
yield
finally:
np.random.set_state(np_state)
if set_pytorch and config.TORCH_AVAILABLE:
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
if set_tensorflow and config.TF_AVAILABLE:
tf.random.set_global_generator(tf_state)
tf_context._seed = tf_seed
if tf_rng_initialized:
tf_context._rng = tf_rng
else:
delattr(tf_context, "_rng")
def unique_values(values):
"""Iterate over iterable and return only unique values in order."""
seen = set()
for value in values:
if value not in seen:
seen.add(value)
yield value
def no_op_if_value_is_null(func):
"""If the value is None, return None, else call `func`."""
def wrapper(value):
return func(value) if value is not None else None
return wrapper
def first_non_null_value(iterable):
"""Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index."""
for i, value in enumerate(iterable):
if value is not None:
return i, value
return -1, None
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in unique_values(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
class NonMutableDict(dict):
"""Dict where keys can only be added but not modified.
Will raise an error if the user try to overwrite one key. The error message
can be customized during construction. It will be formatted using {key} for
the overwritten key.
"""
def __init__(self, *args, **kwargs):
self._error_msg = kwargs.pop(
"error_msg",
"Try to overwrite existing key: {key}",
)
if kwargs:
raise ValueError("NonMutableDict cannot be initialized with kwargs.")
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in self:
raise ValueError(self._error_msg.format(key=key))
return super().__setitem__(key, value)
def update(self, other):
if any(k in self for k in other):
raise ValueError(self._error_msg.format(key=set(self) & set(other)))
return super().update(other)
class classproperty(property): # pylint: disable=invalid-name
"""Descriptor to be used as decorator for @classmethods."""
def __get__(self, obj, objtype=None):
return self.fget.__get__(None, objtype)()
def _single_map_nested(args):
"""Apply a function recursively to each element of a nested data struct."""
function, data_struct, types, rank, disable_tqdm, desc = args
# Singleton first to spare some computation
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
return function(data_struct)
# Reduce logging to keep things readable in multiprocessing with tqdm
if rank is not None and logging.get_verbosity() < logging.WARNING:
logging.set_verbosity_warning()
# Print at least one thing to fix tqdm in notebooks in multiprocessing
# see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308
if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):
print(" ", end="", flush=True)
# Loop over single examples or batches and write to buffer/file if examples are to be updated
pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct
pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc
with logging.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
if isinstance(data_struct, dict):
return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar}
else:
mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar]
if isinstance(data_struct, list):
return mapped
elif isinstance(data_struct, tuple):
return tuple(mapped)
else:
return np.array(mapped)
def map_nested(
function: Callable[[Any], Any],
data_struct: Any,
dict_only: bool = False,
map_list: bool = True,
map_tuple: bool = False,
map_numpy: bool = False,
num_proc: Optional[int] = None,
parallel_min_length: int = 2,
types: Optional[tuple] = None,
disable_tqdm: bool = True,
desc: Optional[str] = None,
) -> Any:
"""Apply a function recursively to each element of a nested data struct.
Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
`parallel_min_length`.
<Changed version="2.5.0">
Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
multiprocessing is used.
</Changed>
Args:
function (`Callable`): Function to be applied to `data_struct`.
data_struct (`Any`): Data structure to apply `function` to.
dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
`data_struct`.
map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
values).
map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
`dict` values).
map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
`dict` values).
num_proc (`int`, *optional*): Number of processes.
parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
processing.
<Added version="2.5.0"/>
types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
elements.
disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
desc (`str`, *optional*): Prefix for the tqdm progressbar.
Returns:
`Any`
"""
if types is None:
types = []
if not dict_only:
if map_list:
types.append(list)
if map_tuple:
types.append(tuple)
if map_numpy:
types.append(np.ndarray)
types = tuple(types)
# Singleton
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
return function(data_struct)
disable_tqdm = disable_tqdm or not logging.is_progress_bar_enabled()
iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
if num_proc is None:
num_proc = 1
if num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
mapped = [
_single_map_nested((function, obj, types, None, True, None))
for obj in logging.tqdm(iterable, disable=disable_tqdm, desc=desc)
]
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".* is experimental and might be subject to breaking changes in the future\\.$",
category=UserWarning,
)
mapped = parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, _single_map_nested)
if isinstance(data_struct, dict):
return dict(zip(data_struct.keys(), mapped))
else:
if isinstance(data_struct, list):
return mapped
elif isinstance(data_struct, tuple):
return tuple(mapped)
else:
return np.array(mapped)
class NestedDataStructure:
def __init__(self, data=None):
self.data = data if data is not None else []
def flatten(self, data=None):
data = data if data is not None else self.data
if isinstance(data, dict):
return self.flatten(list(data.values()))
elif isinstance(data, (list, tuple)):
return [flattened for item in data for flattened in self.flatten(item)]
else:
return [data]
def has_sufficient_disk_space(needed_bytes, directory="."):
try:
free_bytes = disk_usage(os.path.abspath(directory)).free
except OSError:
return True
return needed_bytes < free_bytes
def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
"""Convert a link to a file on a github repo in a link to the raw github object."""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
if not url_path.endswith(".py"):
raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def get_imports(file_path: str) -> Tuple[str, str, str, str]:
"""Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
Example::
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
"""
lines = []
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
logger.debug(f"Checking {file_path} for additional imports.")
imports: List[Tuple[str, str, str, Optional[str]]] = []
is_in_docstring = False
for line in lines:
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
if len(docstr_start_match) == 1:
# flip True <=> False only if doctstring
# starts at line without finishing
is_in_docstring = not is_in_docstring
if is_in_docstring:
# import statements in doctstrings should
# not be added as required dependencies
continue
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retrieve it from the given url
url_path = match.group(3)
url_path, sub_directory = _convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
if match.group(3):
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
url_path = match.group(3)
imports.append(("library", match.group(2), url_path, None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
class Pickler(dill.Pickler):
"""Same Pickler as the one from dill, but improved for notebooks and shells"""
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
def save(self, obj, save_persistent_id=True):
# lazy registration of reduction functions
obj_type = type(obj)
if obj_type not in Pickler.dispatch:
if config.DILL_VERSION < version.parse("0.3.6"):
def dill_log(pickler, msg):
dill._dill.log.info(msg)
elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]:
def dill_log(pickler, msg):
dill._dill.logger.trace(pickler, msg)
if (obj_type.__module__, obj_type.__name__) == ("_regex", "Pattern"):
try:
import regex
@pklregister(obj_type)
def _save_regex(pickler, obj):
dill_log(pickler, f"Re: {obj}")
args = (
obj.pattern,
obj.flags,
)
pickler.save_reduce(regex.compile, args, obj=obj)
dill_log(pickler, "# Re")
return
except ImportError:
pass
elif (obj_type.__module__, obj_type.__name__) == ("torch", "Tensor"):
try:
import torch
@pklregister(obj_type)
def _save_tensor(pickler, obj):
# `torch.from_numpy` is not picklable in `torch>=1.11.0`
def _create_tensor(np_array):
return torch.from_numpy(np_array)
dill_log(pickler, f"To: {obj}")
args = (obj.detach().cpu().numpy(),)
pickler.save_reduce(_create_tensor, args, obj=obj)
dill_log(pickler, "# To")
return
except ImportError:
pass
elif (obj_type.__module__, obj_type.__name__) == ("tiktoken.core", "Encoding"):
try:
import tiktoken
@pklregister(obj_type)
def _save_encoding(pickler, obj):
dill_log(pickler, f"Enc: {obj}")
args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
dill_log(pickler, "# Enc")
return
except ImportError:
pass
elif obj_type.__module__.startswith("spacy.lang") and any(
(cls.__module__, cls.__name__) == ("spacy.language", "Language") for cls in obj_type.__mro__
):
try:
import spacy
@pklregister(obj_type)
def _save_lang(pickler, obj):
def _create_lang(config, bytes_data):
lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
nlp = lang_cls.from_config(config)
return nlp.from_bytes(bytes_data)
dill_log(pickler, f"Sp: {obj}")
args = (obj.config, obj.to_bytes())
pickler.save_reduce(_create_lang, args, obj=obj)
dill_log(pickler, "# Sp")
return
except ImportError:
pass
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
def memoize(self, obj):
# don't memoize strings since two identical strings can have different python ids
if type(obj) != str: # noqa: E721
dill.Pickler.memoize(self, obj)
def dump(obj, file):
"""pickle an object to a file"""
Pickler(file, recurse=True).dump(obj)
return
@contextmanager
def _no_cache_fields(obj):
try:
if (
"PreTrainedTokenizerBase" in [base_class.__name__ for base_class in type(obj).__mro__]
and hasattr(obj, "cache")
and isinstance(obj.cache, dict)
):
with temporary_assignment(obj, "cache", {}):
yield
else:
yield
except ImportError:
yield
def dumps(obj):
"""pickle an object to a string"""
file = StringIO()
with _no_cache_fields(obj):
dump(obj, file)
return file.getvalue()
def pklregister(t):
def proxy(func):
Pickler.dispatch[t] = func
return func
return proxy
if config.DILL_VERSION < version.parse("0.3.6"):
@pklregister(CodeType)
def _save_code(pickler, obj):
"""
From dill._dill.save_code
This is a modified version that removes the origin (filename + line no.)
of functions created in notebooks or shells for example.
"""
dill._dill.log.info(f"Co: {obj}")
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation
if dill._dill.PY3:
if hasattr(obj, "co_posonlyargcount"):
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(CodeType, args, obj=obj)
dill._dill.log.info("# Co")
return
elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]:
# From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
@pklregister(CodeType)
def save_code(pickler, obj):
dill._dill.logger.trace(pickler, "Co: %s", obj)
############################################################################################################
# Modification here for huggingface/datasets
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation, except for the replacements:
# - obj.co_filename => co_filename
# - obj.co_firstlineno => co_firstlineno
############################################################################################################
if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_endlinetable,
obj.co_columntable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else: # python 3.7 (15 args)
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(dill._dill._create_code, args, obj=obj)
dill._dill.logger.trace(pickler, "# Co")
return
if config.DILL_VERSION < version.parse("0.3.5"):
@pklregister(FunctionType)
def save_function(pickler, obj):
"""
From dill._dill.save_function
This is a modified version that make globs deterministic since the order of
the keys in the output dictionary of globalvars can change.
"""
if not dill._dill._locate_function(obj):
dill._dill.log.info(f"F1: {obj}")
if getattr(pickler, "_recurse", False):
# recurse to get all globals referred to by obj
globalvars = dill.detect.globalvars
globs = globalvars(obj, recurse=True, builtin=True)
if id(obj) in dill._dill.stack:
globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals
else:
globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals
# globs is a dictionary with keys = var names (str) and values = python objects
# however the dictionary is not always loaded in the same order
# therefore we have to sort the keys to make deterministic.
# This is important to make `dump` deterministic.
# Only this line is different from the original implementation:
globs = dict(sorted(globs.items()))
# The rest is the same as in the original dill implementation
_byref = getattr(pickler, "_byref", None)
_recurse = getattr(pickler, "_recurse", None)
_memo = (id(obj) in dill._dill.stack) and (_recurse is not None)
dill._dill.stack[id(obj)] = len(dill._dill.stack), obj
if dill._dill.PY3:
_super = ("super" in getattr(obj.__code__, "co_names", ())) and (_byref is not None)
if _super:
pickler._byref = True
if _memo:
pickler._recurse = False
fkwdefaults = getattr(obj, "__kwdefaults__", None)
pickler.save_reduce(
dill._dill._create_function,
(obj.__code__, globs, obj.__name__, obj.__defaults__, obj.__closure__, obj.__dict__, fkwdefaults),
obj=obj,
)
else:
_super = (
("super" in getattr(obj.func_code, "co_names", ()))
and (_byref is not None)
and getattr(pickler, "_recurse", False)
)
if _super:
pickler._byref = True
if _memo:
pickler._recurse = False
pickler.save_reduce(
dill._dill._create_function,
(obj.func_code, globs, obj.func_name, obj.func_defaults, obj.func_closure, obj.__dict__),
obj=obj,
)
if _super:
pickler._byref = _byref
if _memo:
pickler._recurse = _recurse
if (
dill._dill.OLDER
and not _byref
and (_super or (not _super and _memo) or (not _super and not _memo and _recurse))
):
pickler.clear_memo()
dill._dill.log.info("# F1")
else:
dill._dill.log.info(f"F2: {obj}")
name = getattr(obj, "__qualname__", getattr(obj, "__name__", None))
dill._dill.StockPickler.save_global(pickler, obj, name=name)
dill._dill.log.info("# F2")
return
elif config.DILL_VERSION.release[:3] == version.parse("0.3.5").release: # 0.3.5, 0.3.5.1
# https://github.com/uqfoundation/dill/blob/dill-0.3.5.1/dill/_dill.py
@pklregister(FunctionType)
def save_function(pickler, obj):
if not dill._dill._locate_function(obj, pickler):
dill._dill.log.info("F1: %s" % obj)
_recurse = getattr(pickler, "_recurse", None)
_postproc = getattr(pickler, "_postproc", None)
_main_modified = getattr(pickler, "_main_modified", None)
_original_main = getattr(pickler, "_original_main", dill._dill.__builtin__) # 'None'
postproc_list = []
if _recurse:
# recurse to get all globals referred to by obj
from dill.detect import globalvars
globs_copy = globalvars(obj, recurse=True, builtin=True)
# Add the name of the module to the globs dictionary to prevent
# the duplication of the dictionary. Pickle the unpopulated
# globals dictionary and set the remaining items after the function
# is created to correctly handle recursion.
globs = {"__name__": obj.__module__}
else:
globs_copy = obj.__globals__ if dill._dill.PY3 else obj.func_globals
# If the globals is the __dict__ from the module being saved as a
# session, substitute it by the dictionary being actually saved.
if _main_modified and globs_copy is _original_main.__dict__:
globs_copy = getattr(pickler, "_main", _original_main).__dict__
globs = globs_copy
# If the globals is a module __dict__, do not save it in the pickle.
elif (
globs_copy is not None
and obj.__module__ is not None
and getattr(dill._dill._import_module(obj.__module__, True), "__dict__", None) is globs_copy
):
globs = globs_copy
else:
globs = {"__name__": obj.__module__}
# DONE: modified here for huggingface/datasets
# - globs is a dictionary with keys = var names (str) and values = python objects
# - globs_copy is a dictionary with keys = var names (str) and values = ids of the python objects
# however the dictionary is not always loaded in the same order
# therefore we have to sort the keys to make deterministic.
# This is important to make `dump` deterministic.
# Only these line are different from the original implementation:
# START
globs_is_globs_copy = globs is globs_copy
globs = dict(sorted(globs.items()))
if globs_is_globs_copy:
globs_copy = globs
elif globs_copy is not None:
globs_copy = dict(sorted(globs_copy.items()))
# END
if globs_copy is not None and globs is not globs_copy:
# In the case that the globals are copied, we need to ensure that
# the globals dictionary is updated when all objects in the
# dictionary are already created.
if dill._dill.PY3:
glob_ids = {id(g) for g in globs_copy.values()}
else:
glob_ids = {id(g) for g in globs_copy.itervalues()}
for stack_element in _postproc:
if stack_element in glob_ids:
_postproc[stack_element].append((dill._dill._setitems, (globs, globs_copy)))
break
else:
postproc_list.append((dill._dill._setitems, (globs, globs_copy)))
if dill._dill.PY3:
closure = obj.__closure__
state_dict = {}
for fattrname in ("__doc__", "__kwdefaults__", "__annotations__"):
fattr = getattr(obj, fattrname, None)
if fattr is not None:
state_dict[fattrname] = fattr
if obj.__qualname__ != obj.__name__:
state_dict["__qualname__"] = obj.__qualname__
if "__name__" not in globs or obj.__module__ != globs["__name__"]:
state_dict["__module__"] = obj.__module__
state = obj.__dict__
if type(state) is not dict: # noqa: E721
state_dict["__dict__"] = state
state = None
if state_dict:
state = state, state_dict
dill._dill._save_with_postproc(
pickler,
(
dill._dill._create_function,
(obj.__code__, globs, obj.__name__, obj.__defaults__, closure),
state,
),
obj=obj,
postproc_list=postproc_list,
)
else:
closure = obj.func_closure
if obj.__doc__ is not None:
postproc_list.append((setattr, (obj, "__doc__", obj.__doc__)))
if "__name__" not in globs or obj.__module__ != globs["__name__"]:
postproc_list.append((setattr, (obj, "__module__", obj.__module__)))
if obj.__dict__:
postproc_list.append((setattr, (obj, "__dict__", obj.__dict__)))
dill._dill._save_with_postproc(
pickler,
(dill._dill._create_function, (obj.func_code, globs, obj.func_name, obj.func_defaults, closure)),
obj=obj,
postproc_list=postproc_list,
)
# Lift closure cell update to earliest function (#458)
if _postproc:
topmost_postproc = next(iter(_postproc.values()), None)
if closure and topmost_postproc:
for cell in closure:
possible_postproc = (setattr, (cell, "cell_contents", obj))
try:
topmost_postproc.remove(possible_postproc)
except ValueError:
continue
# Change the value of the cell
pickler.save_reduce(*possible_postproc)
# pop None created by calling preprocessing step off stack
if dill._dill.PY3:
pickler.write(bytes("0", "UTF-8"))
else:
pickler.write("0")
dill._dill.log.info("# F1")
else:
dill._dill.log.info("F2: %s" % obj)
name = getattr(obj, "__qualname__", getattr(obj, "__name__", None))
dill._dill.StockPickler.save_global(pickler, obj, name=name)
dill._dill.log.info("# F2")
return
elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]:
# From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1739
@pklregister(FunctionType)
def save_function(pickler, obj):
if not dill._dill._locate_function(obj, pickler):
if type(obj.__code__) is not CodeType:
# Some PyPy builtin functions have no module name, and thus are not
# able to be located
module_name = getattr(obj, "__module__", None)
if module_name is None:
module_name = dill._dill.__builtin__.__name__
module = dill._dill._import_module(module_name, safe=True)
_pypy_builtin = False
try:
found, _ = dill._dill._getattribute(module, obj.__qualname__)
if getattr(found, "__func__", None) is obj:
_pypy_builtin = True
except AttributeError:
pass
if _pypy_builtin:
dill._dill.logger.trace(pickler, "F3: %s", obj)
pickler.save_reduce(getattr, (found, "__func__"), obj=obj)
dill._dill.logger.trace(pickler, "# F3")
return
dill._dill.logger.trace(pickler, "F1: %s", obj)
_recurse = getattr(pickler, "_recurse", None)
_postproc = getattr(pickler, "_postproc", None)
_main_modified = getattr(pickler, "_main_modified", None)
_original_main = getattr(pickler, "_original_main", dill._dill.__builtin__) # 'None'
postproc_list = []
if _recurse:
# recurse to get all globals referred to by obj
from dill.detect import globalvars
globs_copy = globalvars(obj, recurse=True, builtin=True)
# Add the name of the module to the globs dictionary to prevent
# the duplication of the dictionary. Pickle the unpopulated
# globals dictionary and set the remaining items after the function
# is created to correctly handle recursion.
globs = {"__name__": obj.__module__}
else:
globs_copy = obj.__globals__
# If the globals is the __dict__ from the module being saved as a
# session, substitute it by the dictionary being actually saved.
if _main_modified and globs_copy is _original_main.__dict__:
globs_copy = getattr(pickler, "_main", _original_main).__dict__
globs = globs_copy
# If the globals is a module __dict__, do not save it in the pickle.
elif (
globs_copy is not None
and obj.__module__ is not None
and getattr(dill._dill._import_module(obj.__module__, True), "__dict__", None) is globs_copy
):
globs = globs_copy
else:
globs = {"__name__": obj.__module__}
########################################################################################################
# Modification here for huggingface/datasets
# - globs is a dictionary with keys = var names (str) and values = python objects
# - globs_copy is a dictionary with keys = var names (str) and values = ids of the python objects
# However the dictionary is not always loaded in the same order,
# therefore we have to sort the keys to make deterministic.
# This is important to make `dump` deterministic.
# Only these line are different from the original implementation:
# START
globs_is_globs_copy = globs is globs_copy
globs = dict(sorted(globs.items()))
if globs_is_globs_copy:
globs_copy = globs
elif globs_copy is not None:
globs_copy = dict(sorted(globs_copy.items()))
# END
########################################################################################################
if globs_copy is not None and globs is not globs_copy:
# In the case that the globals are copied, we need to ensure that
# the globals dictionary is updated when all objects in the
# dictionary are already created.
glob_ids = {id(g) for g in globs_copy.values()}
for stack_element in _postproc:
if stack_element in glob_ids:
_postproc[stack_element].append((dill._dill._setitems, (globs, globs_copy)))
break
else:
postproc_list.append((dill._dill._setitems, (globs, globs_copy)))
closure = obj.__closure__
state_dict = {}
for fattrname in ("__doc__", "__kwdefaults__", "__annotations__"):
fattr = getattr(obj, fattrname, None)
if fattr is not None:
state_dict[fattrname] = fattr
if obj.__qualname__ != obj.__name__:
state_dict["__qualname__"] = obj.__qualname__
if "__name__" not in globs or obj.__module__ != globs["__name__"]:
state_dict["__module__"] = obj.__module__
state = obj.__dict__
if type(state) is not dict: # noqa: E721
state_dict["__dict__"] = state
state = None
if state_dict:
state = state, state_dict
dill._dill._save_with_postproc(
pickler,
(dill._dill._create_function, (obj.__code__, globs, obj.__name__, obj.__defaults__, closure), state),
obj=obj,
postproc_list=postproc_list,
)
# Lift closure cell update to earliest function (#458)
if _postproc:
topmost_postproc = next(iter(_postproc.values()), None)
if closure and topmost_postproc:
for cell in closure:
possible_postproc = (setattr, (cell, "cell_contents", obj))
try:
topmost_postproc.remove(possible_postproc)
except ValueError:
continue
# Change the value of the cell
pickler.save_reduce(*possible_postproc)
# pop None created by calling preprocessing step off stack
pickler.write(bytes("0", "UTF-8"))
dill._dill.logger.trace(pickler, "# F1")
else:
dill._dill.logger.trace(pickler, "F2: %s", obj)
name = getattr(obj, "__qualname__", getattr(obj, "__name__", None))
dill._dill.StockPickler.save_global(pickler, obj, name=name)
dill._dill.logger.trace(pickler, "# F2")
return
def copyfunc(func):
result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)
result.__kwdefaults__ = func.__kwdefaults__
return result
Y = TypeVar("Y")
def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int:
for i, result in enumerate(func(**kwargs)):
queue.put(result)
return i
def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]:
return {f.pid for f in pool._pool}
def iflatmap_unordered(
pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool],
func: Callable[..., Iterable[Y]],
*,
kwargs_iterable: Iterable[dict],
) -> Iterable[Y]:
initial_pool_pid = _get_pool_pid(pool)
pool_changed = False
manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager
with manager_cls() as manager:
queue = manager.Queue()
async_results = [
pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable
]
try:
while True:
try:
yield queue.get(timeout=0.05)
except Empty:
if all(async_result.ready() for async_result in async_results) and queue.empty():
break
if _get_pool_pid(pool) != initial_pool_pid:
pool_changed = True
# One of the subprocesses has died. We should not wait forever.
raise RuntimeError(
"One of the subprocesses has abruptly died during map operation."
"To debug the error, disable multiprocessing."
)
finally:
if not pool_changed:
# we get the result in case there's an error to raise
[async_result.get(timeout=0.05) for async_result in async_results]
| datasets-main | src/datasets/utils/py_utils.py |
import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Args
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int64)
def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
"""
Provides train/test indices to split data in train/test sets.
It's reference is taken from StratifiedShuffleSplit implementation
of scikit-learn library.
Args
----------
n_train : int,
represents the absolute number of train samples.
n_test : int,
represents the absolute number of test samples.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
"""
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("Minimum class count error")
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes)
)
class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
for _ in range(n_splits):
n_i = approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
| datasets-main | src/datasets/utils/stratify.py |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
if version.parse(hfh.__version__).release < version.parse("0.11.0").release:
# old versions of hfh don't url-encode the file path
path = quote(path)
return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision)
| datasets-main | src/datasets/utils/hub.py |
from importlib import import_module
from .logging import get_logger
logger = get_logger(__name__)
class _PatchedModuleObj:
"""Set all the modules components as attributes of the _PatchedModuleObj object."""
def __init__(self, module, attrs=None):
attrs = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self, key, getattr(module, key))
self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module
class patch_submodule:
"""
Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.
Example::
>>> import importlib
>>> from datasets.load import dataset_module_factory
>>> from datasets.streaming import patch_submodule, xjoin
>>>
>>> dataset_module = dataset_module_factory("snli")
>>> snli_module = importlib.import_module(dataset_module.module_path)
>>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)
>>> patcher.start()
>>> assert snli_module.os.path.join is xjoin
"""
_active_patches = []
def __init__(self, obj, target: str, new, attrs=None):
self.obj = obj
self.target = target
self.new = new
self.key = target.split(".")[0]
self.original = {}
self.attrs = attrs or []
def __enter__(self):
*submodules, target_attr = self.target.split(".")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(submodules)):
try:
submodule = import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
obj_attr = getattr(self.obj, attr)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule)
):
self.original[attr] = obj_attr
# patch at top level
setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))
patched = getattr(self.obj, attr)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))
patched = getattr(patched, key)
# finally set the target attribute
setattr(patched, target_attr, self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
attr_value = getattr(import_module(".".join(submodules)), target_attr)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, attr) is attr_value:
self.original[attr] = getattr(self.obj, attr)
setattr(self.obj, attr, self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
self.original[target_attr] = globals()["__builtins__"][target_attr]
setattr(self.obj, target_attr, self.new)
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")
def __exit__(self, *exc_info):
for attr in list(self.original):
setattr(self.obj, attr, self.original.pop(attr))
def start(self):
"""Activate a patch."""
self.__enter__()
self._active_patches.append(self)
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| datasets-main | src/datasets/utils/patching.py |
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
"""
A platform independent file lock that supports the with-statement.
"""
# Modules
# ------------------------------------------------
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
warnings = None
try:
import msvcrt
except ImportError:
msvcrt = None
try:
import fcntl
except ImportError:
fcntl = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
TimeoutError = OSError
# Data
# ------------------------------------------------
__all__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__version__ = "3.0.12"
_logger = None
def logger():
"""Returns the logger instance used in this module."""
global _logger
_logger = _logger or logging.getLogger(__name__)
return _logger
# Exceptions
# ------------------------------------------------
class Timeout(TimeoutError):
"""
Raised when the lock could not be acquired in *timeout*
seconds.
"""
def __init__(self, lock_file):
""" """
#: The path of the file lock.
self.lock_file = lock_file
return None
def __str__(self):
temp = f"The file lock '{self.lock_file}' could not be acquired."
return temp
# Classes
# ------------------------------------------------
# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
# and wraps the lock to make sure __enter__ is not called twice when entering
# the with statement.
# If we would simply return *self*, the lock would be acquired again
# in the *__enter__* method of the BaseFileLock, but not released again
# automatically.
#
# :seealso: issue #37 (memory leak)
class _Acquire_ReturnProxy:
def __init__(self, lock):
self.lock = lock
return None
def __enter__(self):
return self.lock
def __exit__(self, exc_type, exc_value, traceback):
self.lock.release()
return None
class BaseFileLock:
"""
Implements the base class of a file lock.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
""" """
max_filename_length = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lock_file = self.hash_filename_if_too_long(lock_file, max_filename_length)
# The path to the lock file.
self._lock_file = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
self._lock_file_fd = None
# The default timeout value.
self.timeout = timeout
# We use this lock primarily for the lock counter.
self._thread_lock = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
self._lock_counter = 0
return None
@property
def lock_file(self):
"""
The path to the lock file.
"""
return self._lock_file
@property
def timeout(self):
"""
You can set a default timeout for the filelock. It will be used as
fallback value in the acquire method, if no timeout value (*None*) is
given.
If you want to disable the timeout, set it to a negative value.
A timeout of 0 means, that there is exactly one attempt to acquire the
file lock.
*New in version 2.0.0*
"""
return self._timeout
@timeout.setter
def timeout(self, value):
""" """
self._timeout = float(value)
return None
# Platform dependent locking
# --------------------------------------------
def _acquire(self):
"""
Platform dependent. If the file lock could be
acquired, self._lock_file_fd holds the file descriptor
of the lock file.
"""
raise NotImplementedError()
def _release(self):
"""
Releases the lock and sets self._lock_file_fd to None.
"""
raise NotImplementedError()
# Platform independent methods
# --------------------------------------------
@property
def is_locked(self):
"""
True, if the object holds the file lock.
This was previously a method and is now a property.
"""
return self._lock_file_fd is not None
def acquire(self, timeout=None, poll_intervall=0.05):
"""
Acquires the file lock or fails with a :exc:`Timeout` error.
```py
# You can use this method in the context manager (recommended)
with lock.acquire():
pass
# Or use an equivalent try-finally construct:
lock.acquire()
try:
pass
finally:
lock.release()
```
:arg float timeout:
The maximum time waited for the file lock.
If ``timeout < 0``, there is no timeout and this method will
block until the lock could be acquired.
If ``timeout`` is None, the default :attr:`~timeout` is used.
:arg float poll_intervall:
We check once in *poll_intervall* seconds if we can acquire the
file lock.
:raises Timeout:
if the lock could not be acquired in *timeout* seconds.
This method returns now a *proxy* object instead of *self*,
so that it can be used in a with statement without side effects.
"""
# Use the default timeout, if no timeout is provided.
if timeout is None:
timeout = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lock_id = id(self)
lock_filename = self._lock_file
start_time = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}")
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}")
raise Timeout(self._lock_file)
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..."
)
time.sleep(poll_intervall)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
self._lock_counter = max(0, self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def release(self, force=False):
"""
Releases the file lock.
Please note, that the lock is only completly released, if the lock
counter is 0.
Also note, that the lock file itself is not automatically deleted.
:arg bool force:
If true, the lock counter is ignored and the lock is released in
every case.
"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lock_id = id(self)
lock_filename = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}")
self._release()
self._lock_counter = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}")
return None
def __enter__(self):
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
return None
def __del__(self):
self.release(force=True)
return None
def hash_filename_if_too_long(self, path: str, max_length: int) -> str:
filename = os.path.basename(path)
if len(filename) > max_length and max_length > 0:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = filename[: max_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(dirname, new_filename)
else:
return path
# Windows locking mechanism
# ~~~~~~~~~~~~~~~~~~~~~~~~~
class WindowsFileLock(BaseFileLock):
"""
Uses the :func:`msvcrt.locking` function to hard lock the lock file on
windows systems.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
from .file_utils import relative_to_absolute_path
super().__init__(lock_file, timeout=timeout, max_filename_length=max_filename_length)
self._lock_file = "\\\\?\\" + relative_to_absolute_path(self.lock_file)
def _acquire(self):
open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
fd = os.open(self._lock_file, open_mode)
except OSError:
pass
else:
try:
msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
except OSError:
os.close(fd)
else:
self._lock_file_fd = fd
return None
def _release(self):
fd = self._lock_file_fd
self._lock_file_fd = None
msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
os.close(fd)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
# Unix locking mechanism
# ~~~~~~~~~~~~~~~~~~~~~~
class UnixFileLock(BaseFileLock):
"""
Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
max_filename_length = os.statvfs(os.path.dirname(lock_file)).f_namemax
super().__init__(lock_file, timeout=timeout, max_filename_length=max_filename_length)
def _acquire(self):
open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
fd = os.open(self._lock_file, open_mode)
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(fd)
else:
self._lock_file_fd = fd
return None
def _release(self):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
fd = self._lock_file_fd
self._lock_file_fd = None
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
return None
# Soft lock
# ~~~~~~~~~
class SoftFileLock(BaseFileLock):
"""
Simply watches the existence of the lock file.
"""
def _acquire(self):
open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
fd = os.open(self._lock_file, open_mode)
except OSError:
pass
else:
self._lock_file_fd = fd
return None
def _release(self):
os.close(self._lock_file_fd)
self._lock_file_fd = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
# Platform filelock
# ~~~~~~~~~~~~~~~~~
#: Alias for the lock, which should be used for the current platform. On
#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
FileLock = None
if msvcrt:
FileLock = WindowsFileLock
elif fcntl:
FileLock = UnixFileLock
else:
FileLock = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| datasets-main | src/datasets/utils/filelock.py |
from typing import Callable
def is_documented_by(function_with_docstring: Callable):
"""Decorator to share docstrings across common functions.
Args:
function_with_docstring (`Callable`): Name of the function with the docstring.
"""
def wrapper(target_function):
target_function.__doc__ = function_with_docstring.__doc__
return target_function
return wrapper
| datasets-main | src/datasets/utils/doc_utils.py |
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import io
import json
import os
import posixpath
import re
import shutil
import sys
import time
import urllib
import warnings
from contextlib import closing, contextmanager
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import List, Optional, Type, TypeVar, Union
from urllib.parse import urljoin, urlparse
import fsspec
import huggingface_hub
import requests
from huggingface_hub import HfFolder
from packaging import version
from .. import __version__, config
from ..download.download_config import DownloadConfig
from . import logging
from .extract import ExtractManager
from .filelock import FileLock
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
T = TypeVar("T", str, Path)
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
def is_remote_url(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def relative_to_absolute_path(path: T) -> T:
"""Convert relative path to absolute path."""
abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str:
default_revision = "main" if version.parse(__version__).is_devrelease else __version__
revision = revision or default_revision
if dataset:
return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name)
else:
return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
token=download_config.token,
ignore_url_params=download_config.ignore_url_params,
storage_options=download_config.storage_options,
download_desc=download_config.download_desc,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if output_path is None:
return output_path
if download_config.extract_compressed_file:
output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
output_path, force_extract=download_config.force_extract
)
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = f"datasets/{__version__}"
ua += f"; python/{config.PY_VERSION}"
ua += f"; huggingface_hub/{huggingface_hub.__version__}"
ua += f"; pyarrow/{config.PYARROW_VERSION}"
if config.TORCH_AVAILABLE:
ua += f"; torch/{config.TORCH_VERSION}"
if config.TF_AVAILABLE:
ua += f"; tensorflow/{config.TF_VERSION}"
if config.JAX_AVAILABLE:
ua += f"; jax/{config.JAX_VERSION}"
if config.BEAM_AVAILABLE:
ua += f"; apache_beam/{config.BEAM_VERSION}"
if isinstance(user_agent, dict):
ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(
url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
) -> dict:
"""Handle the HF authentication"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
headers = {}
if url.startswith(config.HF_ENDPOINT):
if token is False:
token = None
elif isinstance(token, str):
token = token
else:
token = HfFolder.get_token()
if token:
headers["authorization"] = f"Bearer {token}"
return headers
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
"""Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True."""
if config.HF_DATASETS_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _retry(
func,
func_args: Optional[tuple] = None,
func_kwargs: Optional[dict] = None,
exceptions: Type[requests.exceptions.RequestException] = requests.exceptions.RequestException,
status_codes: Optional[List[int]] = None,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
):
func_args = func_args or ()
func_kwargs = func_kwargs or {}
retry = 0
while True:
try:
return func(*func_args, **func_kwargs)
except exceptions as err:
if retry >= max_retries or (status_codes and err.response.status_code not in status_codes):
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(f"{func} timed out, retrying in {sleep_time}s... [{retry/max_retries}]")
time.sleep(sleep_time)
retry += 1
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
Args:
method (str): HTTP method, such as 'GET' or 'HEAD'.
url (str): The URL of the resource to fetch.
max_retries (int): Maximum number of retries, defaults to 0 (no retries).
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds.
**params (additional keyword arguments): Params to pass to :obj:`requests.request`.
"""
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def fsspec_head(url, storage_options=None):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
if len(paths) > 1:
raise ValueError(f"HEAD can be called with at most one path but was called with {paths}")
return fs.info(paths[0])
class TqdmCallback(fsspec.callbacks.TqdmCallback):
def __init__(self, tqdm_kwargs=None, *args, **kwargs):
super().__init__(tqdm_kwargs, *args, **kwargs)
self._tqdm = logging # replace tqdm.tqdm by datasets.logging.tqdm
def fsspec_get(url, temp_file, storage_options=None, desc=None):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
if len(paths) > 1:
raise ValueError(f"GET can be called with at most one path but was called with {paths}")
callback = TqdmCallback(
tqdm_kwargs={
"desc": desc or "Downloading",
"disable": not logging.is_progress_bar_enabled(),
"unit": "B",
"unit_scale": True,
}
)
fs.get_file(paths[0], temp_file.name, callback=callback)
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e) from None
def http_get(
url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = f"bytes={resume_size:d}-"
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
with logging.tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc=desc or "Downloading",
disable=not logging.is_progress_bar_enabled(),
) as progress:
for chunk in response.iter_content(chunk_size=1024):
progress.update(len(chunk))
temp_file.write(chunk)
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def request_etag(
url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
) -> Optional[str]:
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if urlparse(url).scheme not in ("http", "https"):
return None
headers = get_authentication_headers_for_url(url, token=token)
response = http_head(url, headers=headers, max_retries=3)
response.raise_for_status()
etag = response.headers.get("ETag") if response.ok else None
return etag
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=100,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
token=None,
use_auth_token="deprecated",
ignore_url_params=False,
storage_options=None,
download_desc=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if cache_dir is None:
cache_dir = config.HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if ignore_url_params:
# strip all query parameters and #fragments from the URL
cached_url = urljoin(url, urlparse(url).path)
else:
cached_url = url # additional parameters may be added to the given URL
connected = False
response = None
cookies = None
etag = None
head_error = None
scheme = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(cached_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, token=token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
scheme = urlparse(url).scheme
if scheme == "ftp":
connected = ftp_head(url)
elif scheme not in ("http", "https"):
response = fsspec_head(url, storage_options=storage_options)
# s3fs uses "ETag", gcsfs uses "etag"
etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None
connected = True
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in url and "confirm=" not in url:
url += "&confirm=t"
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and (
re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
)
)
or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
):
connected = True
logger.info(f"Couldn't get ETag version for url {url}")
elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None:
raise ConnectionError(
f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`"
)
except (OSError, requests.exceptions.Timeout) as e:
# not connected
head_error = e
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path) and not force_download:
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError(f"Couldn't find file at {url}")
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
if head_error is not None:
raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
elif response is not None:
raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
else:
raise ConnectionError(f"Couldn't reach {url}")
# Try a second time
filename = hash_url_to_filename(cached_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# Retry in case previously locked processes just enter after the precedent process releases the lock
if os.path.exists(cache_path) and not force_download:
return cache_path
incomplete_path = cache_path + ".incomplete"
@contextmanager
def temp_file_manager(mode="w+b"):
with open(incomplete_path, mode) as f:
yield f
resume_size = 0
if resume_download:
temp_file_manager = partial(temp_file_manager, mode="a+b")
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
# Download to temporary file, then copy to cache path once finished.
# Otherwise, you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
# GET file object
if scheme == "ftp":
ftp_get(url, temp_file)
elif scheme not in ("http", "https"):
fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
desc=download_desc,
)
logger.info(f"storing {url} in cache at {cache_path}")
shutil.move(temp_file.name, cache_path)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_path, 0o666 & ~umask)
logger.info(f"creating metadata file for {cache_path}")
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
def readline(f: io.RawIOBase):
# From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
res = bytearray()
while True:
b = f.read(1)
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
| datasets-main | src/datasets/utils/file_utils.py |
import os
from typing import Dict, List, Tuple, TypeVar, Union
T = TypeVar("T")
ListLike = Union[List[T], Tuple[T, ...]]
NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
| datasets-main | src/datasets/utils/typing.py |
import bz2
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
logger = get_logger(__name__)
class ExtractManager:
def __init__(self, cache_dir: Optional[str] = None):
self.extract_dir = (
os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
self.extractor = Extractor
def _get_output_path(self, path: str) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_path = os.path.abspath(path)
return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))
def _do_extract(self, output_path: str, force_extract: bool) -> bool:
return force_extract or (
not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))
)
def extract(self, input_path: str, force_extract: bool = False) -> str:
extractor_format = self.extractor.infer_extractor_format(input_path)
if not extractor_format:
return input_path
output_path = self._get_output_path(input_path)
if self._do_extract(output_path, force_extract):
self.extractor.extract(input_path, output_path, extractor_format)
return output_path
class BaseExtractor(ABC):
@classmethod
@abstractmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
...
@staticmethod
@abstractmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
...
class MagicNumberBaseExtractor(BaseExtractor, ABC):
magic_numbers: List[bytes] = []
@staticmethod
def read_magic_number(path: Union[Path, str], magic_number_length: int):
with open(path, "rb") as f:
return f.read(magic_number_length)
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if not magic_number:
magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)
try:
magic_number = cls.read_magic_number(path, magic_number_length)
except OSError:
return False
return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers)
class TarExtractor(BaseExtractor):
@classmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
return tarfile.is_tarfile(path)
@staticmethod
def safemembers(members, output_path):
"""
Fix for CVE-2007-4559
Desc:
Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile
module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)
sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.
See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559
From: https://stackoverflow.com/a/10077309
"""
def resolved(path: str) -> str:
return os.path.realpath(os.path.abspath(path))
def badpath(path: str, base: str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(base, path)).startswith(base)
def badlink(info, base: str) -> bool:
# Links are interpreted relative to the directory containing the link
tip = resolved(os.path.join(base, os.path.dirname(info.name)))
return badpath(info.linkname, base=tip)
base = resolved(output_path)
for finfo in members:
if badpath(finfo.name, base):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
tar_file = tarfile.open(input_path)
tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))
tar_file.close()
class GzipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x1F\x8B"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with gzip.open(input_path, "rb") as gzip_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
class ZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if super().is_extractable(path, magic_number=magic_number):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(path, "rb") as fp:
endrec = _EndRecData(fp)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
data = fp.read(sizeCentralDir) # CD is where we expect it to be
if len(data) == sizeCentralDir:
centdir = struct.unpack(structCentralDir, data) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
with zipfile.ZipFile(input_path, "r") as zip_file:
zip_file.extractall(output_path)
zip_file.close()
class XzExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with lzma.open(input_path) as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class RarExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(output_path, exist_ok=True)
rf = rarfile.RarFile(input_path)
rf.extractall(output_path)
rf.close()
class ZstdExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
dctx = zstd.ZstdDecompressor()
with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
dctx.copy_stream(ifh, ofh)
class Bzip2Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x42\x5A\x68"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with bz2.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class SevenZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import py7zr
os.makedirs(output_path, exist_ok=True)
with py7zr.SevenZipFile(input_path, "r") as archive:
archive.extractall(output_path)
class Lz4Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x04\x22\x4D\x18"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lz4.frame
with lz4.frame.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class Extractor:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
extractors: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": Bzip2Extractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": Lz4Extractor, # <Added version="2.4.0"/>
}
@classmethod
def _get_magic_number_max_length(cls):
return max(
len(extractor_magic_number)
for extractor in cls.extractors.values()
if issubclass(extractor, MagicNumberBaseExtractor)
for extractor_magic_number in extractor.magic_numbers
)
@staticmethod
def _read_magic_number(path: Union[Path, str], magic_number_length: int):
try:
return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)
except OSError:
return b""
@classmethod
def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",
category=FutureWarning,
)
extractor_format = cls.infer_extractor_format(path)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def infer_extractor_format(cls, path: Union[Path, str]) -> str: # <Added version="2.4.0"/>
magic_number_max_length = cls._get_magic_number_max_length()
magic_number = cls._read_magic_number(path, magic_number_max_length)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(path, magic_number=magic_number):
return extractor_format
@classmethod
def extract(
cls,
input_path: Union[Path, str],
output_path: Union[Path, str],
extractor_format: Optional[str] = None, # <Added version="2.4.0"/>
extractor: Optional[BaseExtractor] = "deprecated",
) -> None:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# Prevent parallel extractions
lock_path = str(Path(output_path).with_suffix(".lock"))
with FileLock(lock_path):
shutil.rmtree(output_path, ignore_errors=True)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",
category=FutureWarning,
)
extractor = extractor if extractor != "deprecated" else extractor_format
else:
extractor = cls.extractors[extractor_format]
return extractor.extract(input_path, output_path)
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",
category=FutureWarning,
)
for extractor in cls.extractors.values():
if extractor.is_extractable(input_path):
return extractor.extract(input_path, output_path)
| datasets-main | src/datasets/utils/extract.py |
# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-specific utils import."""
import os
import warnings
from functools import partial
from math import ceil
from uuid import uuid4
import numpy as np
import pyarrow as pa
from multiprocess import get_context
try:
from multiprocess.shared_memory import SharedMemory
except ImportError:
SharedMemory = None # Version checks should prevent this being called on older Python versions
from .. import config
def minimal_tf_collate_fn(features):
if isinstance(features, dict): # case batch_size=None: nothing to collate
return features
elif config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
first = features[0]
batch = {}
for k, v in first.items():
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
elif isinstance(v, tf.Tensor):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
def minimal_tf_collate_fn_with_renaming(features):
batch = minimal_tf_collate_fn(features)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
return batch
def is_numeric_pa_type(pa_type):
if pa.types.is_list(pa_type):
return is_numeric_pa_type(pa_type.value_type)
return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type)
def is_numeric_feature(feature):
from .. import ClassLabel, Sequence, Value
from ..features.features import _ArrayXD
if isinstance(feature, Sequence):
return is_numeric_feature(feature.feature)
elif isinstance(feature, list):
return is_numeric_feature(feature[0])
elif isinstance(feature, _ArrayXD):
return is_numeric_pa_type(feature().storage_dtype)
elif isinstance(feature, Value):
return is_numeric_pa_type(feature())
elif isinstance(feature, ClassLabel):
return True
else:
return False
def np_get_batch(
indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False
):
if not isinstance(indices, np.ndarray):
indices = indices.numpy()
is_batched = True
# Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices
if isinstance(indices, np.integer):
batch = dataset[indices.item()]
is_batched = False
elif np.all(np.diff(indices) == 1):
batch = dataset[indices[0] : indices[-1] + 1]
elif isinstance(indices, np.ndarray):
batch = dataset[indices]
else:
raise RuntimeError("Unexpected type for indices: {}".format(type(indices)))
if cols_to_retain is not None:
batch = {
key: value
for key, value in batch.items()
if key in cols_to_retain or key in ("label", "label_ids", "labels")
}
if is_batched:
actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same
# Our collators expect a list of dicts, not a dict of lists/arrays, so we invert
batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]
batch = collate_fn(batch, **collate_fn_args)
if return_dict:
out_batch = {}
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch[col] = array
else:
out_batch = []
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch.append(array)
return out_batch
def dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess
equivalent is multiprocess_dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
# TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything
# to the NumPy multiprocessing path.
if hasattr(tf, "random_index_shuffle"):
random_index_shuffle = tf.random_index_shuffle
elif hasattr(tf.random.experimental, "index_shuffle"):
random_index_shuffle = tf.random.experimental.index_shuffle
else:
if len(dataset) > 10_000_000:
warnings.warn(
"to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. "
"If you are iterating over a dataset with a very large number of samples, consider "
"upgrading to TF >= 2.9."
)
random_index_shuffle = None
getter_fn = partial(
np_get_batch,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=False,
)
# This works because dictionaries always output in the same order
tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()]
@tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])
def fetch_function(indices):
output = tf.py_function(
getter_fn,
inp=[indices],
Tout=tout,
)
return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())}
tf_dataset = tf.data.Dataset.range(len(dataset))
if shuffle and random_index_shuffle is not None:
base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64))
def scan_random_index(state, index):
if tf.reduce_all(state == -1):
# This generates a new random seed once per epoch only,
# to ensure that we iterate over each sample exactly once per epoch
state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64)
shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1)
return state, shuffled_index
tf_dataset = tf_dataset.scan(base_seed, scan_random_index)
elif shuffle:
tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality())
if batch_size is not None:
tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
tf_dataset = tf_dataset.map(fetch_function)
if batch_size is not None:
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}
else:
# Ensure shape but remove batch dimension of output_signature[key].shape
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()}
return tf_dataset.map(ensure_shapes)
class SharedMemoryContext:
# This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
# The process that creates shared memory is always the one responsible for unlinking it in the end
def __init__(self):
self.created_shms = []
self.opened_shms = []
def get_shm(self, name, size, create):
shm = SharedMemory(size=int(size), name=name, create=create)
if create:
# We only unlink the ones we created in this context
self.created_shms.append(shm)
else:
# If we didn't create it, we only close it when done, we don't unlink it
self.opened_shms.append(shm)
return shm
def get_array(self, name, shape, dtype, create):
shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for shm in self.created_shms:
shm.close()
shm.unlink()
for shm in self.opened_shms:
shm.close()
class NumpyMultiprocessingGenerator:
def __init__(
self,
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
self.dataset = dataset
self.cols_to_retain = cols_to_retain
self.collate_fn = collate_fn
self.collate_fn_args = collate_fn_args
self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)]
# Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
self.columns_to_np_types = {
col: dtype if col not in self.string_columns else np.dtype("U1")
for col, dtype in columns_to_np_types.items()
}
self.output_signature = output_signature
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_remainder = drop_remainder
self.num_workers = num_workers
# Because strings are converted to characters, we need to add one extra dimension to the shape
self.columns_to_ranks = {
col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
for col, spec in output_signature.items()
}
def __iter__(self):
# Make sure we only spawn workers if they have work to do
num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
# Do the shuffling in iter so that it's done at the start of each epoch
per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
)
ctx = get_context("spawn")
names = []
shape_arrays = []
workers = []
array_ready_events = [ctx.Event() for _ in range(num_workers)]
array_loaded_events = [ctx.Event() for _ in range(num_workers)]
base_args = {
"dataset": self.dataset,
"cols_to_retain": self.cols_to_retain,
"collate_fn": self.collate_fn,
"collate_fn_args": self.collate_fn_args,
"columns_to_np_types": self.columns_to_np_types,
"columns_to_ranks": self.columns_to_ranks,
"string_columns": self.string_columns,
}
with SharedMemoryContext() as shm_ctx:
for i in range(num_workers):
worker_random_id = str(uuid4())
worker_name = f"dw_{i}_{worker_random_id}"[:10]
names.append(worker_name)
worker_shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
for col, rank in self.columns_to_ranks.items()
}
shape_arrays.append(worker_shape_arrays)
worker_indices = per_worker_batches[i]
if i == final_batch_worker and final_batch is not None:
final_batch_arg = final_batch
else:
final_batch_arg = None
worker_kwargs = {
"worker_name": worker_name,
"indices": worker_indices,
"extra_batch": final_batch_arg,
"array_ready_event": array_ready_events[i],
"array_loaded_event": array_loaded_events[i],
**base_args,
}
worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
worker.start()
workers.append(worker)
end_signal_received = False
while not end_signal_received:
for i in range(num_workers):
if not array_ready_events[i].wait(timeout=60):
raise TimeoutError("Data loading worker timed out!")
array_ready_events[i].clear()
array_shapes = shape_arrays[i]
if any(np.any(shape < 0) for shape in array_shapes.values()):
# Child processes send negative array shapes to indicate
# that no more data is going to be sent
end_signal_received = True
break
# Matt: Because array shapes are variable we recreate the shared memory each iteration.
# I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
# A future optimization, at the cost of some code complexity, could be to reuse shared memory
# between iterations, but this would require knowing in advance the maximum size, or having
# a system to only create a new memory block when a new maximum size is seen.
# Another potential optimization would be to figure out which memory copies are necessary,
# or whether we can yield objects straight out of shared memory.
with SharedMemoryContext() as batch_shm_ctx:
# This memory context only lasts long enough to copy everything out of the batch
arrays = {
col: batch_shm_ctx.get_array(
f"{names[i]}_{col}",
shape=shape,
dtype=self.columns_to_np_types[col],
create=False,
)
for col, shape in array_shapes.items()
}
# Copy everything out of shm because the memory
# will be unlinked by the child process at some point
arrays = {col: np.copy(arr) for col, arr in arrays.items()}
# Now we convert any unicode char arrays to strings
for string_col in self.string_columns:
arrays[string_col] = (
arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
)
yield arrays
array_loaded_events[i].set()
# Now we just do some cleanup
# Shared memory is cleaned up by the context manager, so we just make sure workers finish
for worker in workers:
worker.join()
def __call__(self):
return self
@staticmethod
def worker_loop(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
columns_to_ranks,
string_columns,
indices,
extra_batch,
worker_name,
array_ready_event,
array_loaded_event,
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
def send_batch_to_parent(indices):
batch = np_get_batch(
indices=indices,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=True,
)
# Now begins the fun part where we start shovelling shared memory at the parent process
out_arrays = {}
with SharedMemoryContext() as batch_shm_ctx:
# The batch shared memory context exists only as long as it takes for the parent process
# to read everything, after which it cleans everything up again
for col, cast_dtype in columns_to_np_types.items():
# Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
array = batch[col]
if col in string_columns:
# We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
# which have a fixed width of 4 bytes. The parent process will convert these back to strings.
array = array.view("U1").reshape(array.shape + (-1,))
shape_arrays[col][:] = array.shape
out_arrays[col] = batch_shm_ctx.get_array(
f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
)
out_arrays[col][:] = array
array_ready_event.set()
array_loaded_event.wait()
array_loaded_event.clear()
with SharedMemoryContext() as shm_ctx:
shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
for col, rank in columns_to_ranks.items()
}
for batch in indices:
send_batch_to_parent(batch)
if extra_batch is not None:
send_batch_to_parent(extra_batch)
# Now we send a batsignal to the parent process that we're done
for col, array in shape_arrays.items():
array[:] = -1
array_ready_event.set()
@staticmethod
def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
indices = np.arange(len(dataset))
if shuffle:
np.random.shuffle(indices)
num_samples = len(indices)
# We distribute the batches so that reading from the workers in round-robin order yields the exact
# order specified in indices. This is only important when shuffle is False, but we do it regardless.
incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
if drop_remainder or len(last_incomplete_batch) == 0:
last_incomplete_batch = None
indices = indices.reshape(-1, batch_size)
num_batches = len(indices)
final_batches_cutoff = num_batches - (num_batches % num_workers)
indices, final_batches = np.split(indices, [final_batches_cutoff])
indices = indices.reshape(-1, num_workers, batch_size)
per_worker_indices = np.split(indices, indices.shape[1], axis=1)
per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
# Distribute the final batches to the first workers
for i in range(len(final_batches)):
# len(final_batches) can be zero, and is always less than num_workers
per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
# Add the last incomplete batch to the next worker, which might be the first worker
if last_incomplete_batch is not None:
incomplete_batch_worker_idx = len(final_batches)
else:
incomplete_batch_worker_idx = None
return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx
def multiprocess_dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process
equivalent is dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
data_generator = NumpyMultiprocessingGenerator(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
num_workers=num_workers,
)
tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature)
if drop_remainder:
dataset_length = int(len(dataset) // batch_size)
else:
dataset_length = int(ceil(len(dataset) / batch_size))
return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
| datasets-main | src/datasets/utils/tf_utils.py |
import enum
import os
from hashlib import sha256
from typing import Optional
from .. import config
from .logging import get_logger
logger = get_logger(__name__)
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
"""
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks"
class ChecksumVerificationException(Exception):
"""Exceptions during checksums verifications of downloaded files."""
class UnexpectedDownloadedFile(ChecksumVerificationException):
"""Some downloaded files were not expected."""
class ExpectedMoreDownloadedFiles(ChecksumVerificationException):
"""Some files were supposed to be downloaded but were not."""
class NonMatchingChecksumError(ChecksumVerificationException):
"""The downloaded file checksum don't match the expected checksum."""
def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(expected_checksums) - set(recorded_checksums)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums)))
if len(set(recorded_checksums) - set(expected_checksums)) > 0:
raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums)))
bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
for_verification_name = " for " + verification_name if verification_name is not None else ""
if len(bad_urls) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"
)
logger.info("All the checksums matched successfully" + for_verification_name)
class SplitsVerificationException(Exception):
"""Exceptions during splis verifications"""
class UnexpectedSplits(SplitsVerificationException):
"""The expected splits of the downloaded file is missing."""
class ExpectedMoreSplits(SplitsVerificationException):
"""Some recorded splits are missing."""
class NonMatchingSplitsSizesError(SplitsVerificationException):
"""The splits sizes don't match the expected splits sizes."""
def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(expected_splits) - set(recorded_splits)) > 0:
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
if len(set(recorded_splits) - set(expected_splits)) > 0:
raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits)))
bad_splits = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(bad_splits) > 0:
raise NonMatchingSplitsSizesError(str(bad_splits))
logger.info("All the splits matched successfully.")
def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:
"""Compute the file size and the sha256 checksum of a file"""
if record_checksum:
m = sha256()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(1 << 20), b""):
m.update(chunk)
checksum = m.hexdigest()
else:
checksum = None
return {"num_bytes": os.path.getsize(path), "checksum": checksum}
def is_small_dataset(dataset_size):
"""Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
Args:
dataset_size (int): Dataset size in bytes.
Returns:
bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| datasets-main | src/datasets/utils/info_utils.py |
import enum
import inspect
import warnings
from functools import wraps
from typing import Callable, Optional
from .logging import get_logger
_emitted_deprecation_warnings = set()
logger = get_logger(__name__)
def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a class or a function as deprecated.
Args:
help_message (:obj:`str`, optional): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
"""
def decorator(deprecated_class_or_function: Callable):
global _emitted_deprecation_warnings
if inspect.isclass(deprecated_class_or_function):
deprecated_function = deprecated_class_or_function.__init__
name = deprecated_class_or_function.__name__
else:
deprecated_function = deprecated_class_or_function
name = deprecated_function.__name__
# Support deprecating __init__ class method: class name instead
name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
warning_msg = (
f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
if help_message
else ""
)
@wraps(deprecated_function)
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)
wrapper._decorator_name_ = "deprecated"
if inspect.isclass(deprecated_class_or_function):
deprecated_class_or_function.__init__ = wrapper
return deprecated_class_or_function
else:
return wrapper
return decorator
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
)
| datasets-main | src/datasets/utils/deprecation_utils.py |
datasets-main | src/datasets/utils/resources/__init__.py |
|
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Mock download manager interface."""
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
logger = get_logger(__name__)
class MockDownloadManager:
dummy_file_name = "dummy_data"
datasets_scripts_dir = "datasets"
is_streaming = False
def __init__(
self,
dataset_name: str,
config: str,
version: Union[Version, str],
cache_dir: Optional[str] = None,
use_local_dummy_data: bool = False,
load_existing_dummy_data: bool = True,
download_callbacks: Optional[List[Callable]] = None,
):
self.downloaded_size = 0
self.dataset_name = dataset_name
self.cache_dir = cache_dir
self.use_local_dummy_data = use_local_dummy_data
self.config = config
# download_callbacks take a single url as input
self.download_callbacks: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
self.load_existing_dummy_data = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
self.version_name = str(version)
# to be downloaded
self._dummy_file = None
self._bucket_url = None
@property
def dummy_file(self):
if self._dummy_file is None:
self._dummy_file = self.download_dummy_data()
return self._dummy_file
@property
def dummy_data_folder(self):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy", self.config.name, self.version_name)
# structure is dummy / version_name
return os.path.join("dummy", self.version_name)
@property
def dummy_zip_file(self):
return os.path.join(self.dummy_data_folder, "dummy_data.zip")
def download_dummy_data(self):
path_to_dummy_data_dir = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
local_path = cached_path(
path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
)
return os.path.join(local_path, self.dummy_file_name)
@property
def local_path_to_dummy_data(self):
return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
@property
def github_path_to_dummy_data(self):
if self._bucket_url is None:
self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
return self._bucket_url
@property
def manual_dir(self):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
# this function has to be in the manager under this name so that testing works
def download_and_extract(self, data_url, *args):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
dummy_file = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
dummy_file = self.dummy_file_name
# special case when data_url is a dict
if isinstance(data_url, dict):
return self.create_dummy_data_dict(dummy_file, data_url)
elif isinstance(data_url, (list, tuple)):
return self.create_dummy_data_list(dummy_file, data_url)
else:
return self.create_dummy_data_single(dummy_file, data_url)
# this function has to be in the manager under this name so that testing works
def download(self, data_url, *args):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def download_custom(self, data_url, custom_download):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def extract(self, path, *args, **kwargs):
return path
# this function has to be in the manager under this name so that testing works
def get_recorded_sizes_checksums(self):
return {}
def create_dummy_data_dict(self, path_to_dummy_data, data_url):
dummy_data_dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(single_urls, list):
for single_url in single_urls:
download_callback(single_url)
else:
single_url = single_urls
download_callback(single_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(single_urls, list):
value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
else:
single_url = single_urls
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
dummy_data_dict[key] = value
# make sure that values are unique
if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()
):
# append key to value to make its name unique
dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def create_dummy_data_list(self, path_to_dummy_data, data_url):
dummy_data_list = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
is_pubmed_records = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
)
if data_url and (is_tf_records or is_pubmed_records):
data_url = [data_url[0]] * len(data_url)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(single_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(value)
return dummy_data_list
def create_dummy_data_single(self, path_to_dummy_data, data_url):
for download_callback in self.download_callbacks:
download_callback(data_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(value) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def delete_extracted_files(self):
pass
def manage_extracted_files(self):
pass
def iter_archive(self, path):
def _iter_archive_members(path):
# this preserves the order of the members inside the ZIP archive
dummy_parent_path = Path(self.dummy_file).parent
relative_path = path.relative_to(dummy_parent_path)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
members = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(member)
path = Path(path)
file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(path).as_posix(), file_path.open("rb")
def iter_files(self, paths):
if not isinstance(paths, list):
paths = [paths]
for path in paths:
if os.path.isfile(path):
yield path
else:
for dirpath, dirnames, filenames in os.walk(path):
if os.path.basename(dirpath).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(filenames):
if filename.startswith((".", "__")):
continue
yield os.path.join(dirpath, filename)
| datasets-main | src/datasets/download/mock_download_manager.py |
import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self, use_auth_token):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
self.token = use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
| datasets-main | src/datasets/download/download_config.py |
__all__ = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| datasets-main | src/datasets/download/__init__.py |
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Download manager interface."""
import enum
import io
import os
import posixpath
import tarfile
import warnings
import zipfile
from datetime import datetime
from functools import partial
from itertools import chain
from typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
from .. import config
from ..utils.deprecation_utils import DeprecatedEnum, deprecated
from ..utils.file_utils import cached_path, get_from_cache, hash_url_to_filename, is_relative_path, url_or_path_join
from ..utils.info_utils import get_size_checksum_dict
from ..utils.logging import get_logger, is_progress_bar_enabled, tqdm
from ..utils.py_utils import NestedDataStructure, map_nested, size_str
from .download_config import DownloadConfig
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = [
"txt",
"csv",
"json",
"jsonl",
"tsv",
"conll",
"conllu",
"orig",
"parquet",
"pkl",
"pickle",
"rel",
"xml",
]
MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
bytes.fromhex("504B0304"): "zip",
bytes.fromhex("504B0506"): "zip", # empty archive
bytes.fromhex("504B0708"): "zip", # spanned archive
bytes.fromhex("425A68"): "bz2",
bytes.fromhex("1F8B"): "gzip",
bytes.fromhex("FD377A585A00"): "xz",
bytes.fromhex("04224D18"): "lz4",
bytes.fromhex("28B52FFD"): "zstd",
}
MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
b"Rar!": "rar",
}
MAGIC_NUMBER_MAX_LENGTH = max(
len(magic_number)
for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
)
class DownloadMode(enum.Enum):
"""`Enum` for how to treat pre-existing downloads and data.
The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
raw downloads and the prepared dataset if they exist.
The generations modes:
| | Downloads | Dataset |
|-------------------------------------|-----------|---------|
| `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
| `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
| `FORCE_REDOWNLOAD` | Fresh | Fresh |
"""
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload"
class GenerateMode(DeprecatedEnum):
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload"
@property
def help_message(self):
return "Use 'DownloadMode' instead."
def _get_path_extension(path: str) -> str:
# Get extension: train.json.gz -> gz
extension = path.split(".")[-1]
# Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
# Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
for symb in "?-_":
extension = extension.split(symb)[0]
return extension
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
"""read the magic number from a file-like object and return the compression protocol"""
# Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
try:
f.seek(0)
except (AttributeError, io.UnsupportedOperation):
return None
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
def _get_extraction_protocol(path: str) -> Optional[str]:
path = str(path)
extension = _get_path_extension(path)
# TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`)
if (
extension in BASE_KNOWN_EXTENSIONS
or extension in ["tgz", "tar"]
or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
):
return None
with open(path, "rb") as f:
return _get_extraction_protocol_with_magic_number(f)
class _IterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator: Callable, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
yield from self.generator(*self.args, **self.kwargs)
class ArchiveIterable(_IterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath)
with open(urlpath, "rb") as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_path(cls, urlpath_or_buf) -> "ArchiveIterable":
return cls(cls._iter_from_path, urlpath_or_buf)
class FilesIterable(_IterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if os.path.isfile(urlpath):
yield urlpath
else:
for dirpath, dirnames, filenames in os.walk(urlpath):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if os.path.basename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")):
# skipping hidden files
continue
yield os.path.join(dirpath, filename)
@classmethod
def from_paths(cls, urlpaths) -> "FilesIterable":
return cls(cls._iter_from_paths, urlpaths)
class DownloadManager:
is_streaming = False
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
record_checksums=True,
):
"""Download manager constructor.
Args:
data_dir:
can be used to specify a manual directory to get the files from.
dataset_name (`str`):
name of dataset this instance will be used for. If
provided, downloads will contain which datasets they were used for.
download_config (`DownloadConfig`):
to specify the cache directory and other
download options
base_path (`str`):
base path that is used when relative paths are used to
download files. This can be a remote url.
record_checksums (`bool`, defaults to `True`):
Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
"""
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
# To record what is being used: {url: {num_bytes: int, checksum: str}}
self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
self.record_checksums = record_checksums
self.download_config = download_config or DownloadConfig()
self.downloaded_paths = {}
self.extracted_paths = {}
@property
def manual_dir(self):
return self._data_dir
@property
def downloaded_size(self):
"""Returns the total size of downloaded files."""
return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
@staticmethod
def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
"""Ship the files using Beam FileSystems to the pipeline temp dir.
Args:
downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
Nested structure containing the
downloaded path(s).
pipeline ([`utils.beam_utils.BeamPipeline`]):
Apache Beam Pipeline.
Returns:
`str` or `list[str]` or `dict[str, str]`
"""
from ..utils.beam_utils import upload_local_to_remote
remote_dir = pipeline._options.get_all_options().get("temp_location")
if remote_dir is None:
raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
def upload(local_file_path):
remote_file_path = posixpath.join(
remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
)
logger.info(
f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
)
upload_local_to_remote(local_file_path, remote_file_path)
return remote_file_path
uploaded_path_or_paths = map_nested(
lambda local_file_path: upload(local_file_path),
downloaded_path_or_paths,
disable_tqdm=not is_progress_bar_enabled(),
)
return uploaded_path_or_paths
def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
"""Record size/checksum of downloaded files."""
delay = 5
for url, path in tqdm(
list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
delay=delay,
desc="Computing checksums",
disable=not is_progress_bar_enabled(),
):
# call str to support PathLike objects
self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
path, record_checksum=self.record_checksums
)
@deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
def download_custom(self, url_or_urls, custom_download):
"""
Download given urls(s) by calling `custom_download`.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
custom_download (`Callable[src_url, dst_path]`):
The source URL and destination path. For example
`tf.io.gfile.copy`, that lets you download from Google storage.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
`url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
```
"""
cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
max_retries = self.download_config.max_retries
def url_to_downloaded_path(url):
return os.path.join(cache_dir, hash_url_to_filename(url))
downloaded_path_or_paths = map_nested(
url_to_downloaded_path, url_or_urls, disable_tqdm=not is_progress_bar_enabled()
)
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
try:
get_from_cache(
url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
)
cached = True
except FileNotFoundError:
cached = False
if not cached or self.download_config.force_download:
custom_download(url, path)
get_from_cache(
url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
)
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
return downloaded_path_or_paths.data
def download(self, url_or_urls):
"""Download given URL(s).
By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download. Each URL is a `str`.
Returns:
`str` or `list` or `dict`:
The downloaded paths matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = False
if download_config.download_desc is None:
download_config.download_desc = "Downloading data"
download_func = partial(self._download, download_config=download_config)
start_time = datetime.now()
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
disable_tqdm=not is_progress_bar_enabled(),
desc="Downloading data files",
)
duration = datetime.now() - start_time
logger.info(f"Downloading took {duration.total_seconds() // 60} min")
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
start_time = datetime.now()
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
duration = datetime.now() - start_time
logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
return downloaded_path_or_paths.data
def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str:
url_or_filename = str(url_or_filename)
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
return cached_path(url_or_filename, download_config=download_config)
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
"""Iterate over files within an archive.
Args:
path_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(path_or_buf, "read"):
return ArchiveIterable.from_buf(path_or_buf)
else:
return ArchiveIterable.from_path(path_or_buf)
def iter_files(self, paths: Union[str, List[str]]):
"""Iterate over file paths.
Args:
paths (`str` or `list` of `str`):
Root paths.
Yields:
`str`: File path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_paths(paths)
def extract(self, path_or_paths, num_proc="deprecated"):
"""Extract given path(s).
Args:
path_or_paths (path or `list` or `dict`):
Path of file to extract. Each path is a `str`.
num_proc (`int`):
Use multi-processing if `num_proc` > 1 and the length of
`path_or_paths` is larger than `num_proc`.
<Deprecated version="2.6.2">
Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.
</Deprecated>
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
if num_proc != "deprecated":
warnings.warn(
"'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.",
FutureWarning,
)
download_config = self.download_config.copy()
download_config.extract_compressed_file = True
# Extract downloads the file first if it is not already downloaded
if download_config.download_desc is None:
download_config.download_desc = "Downloading data"
extracted_paths = map_nested(
partial(cached_path, download_config=download_config),
path_or_paths,
num_proc=download_config.num_proc,
disable_tqdm=not is_progress_bar_enabled(),
desc="Extracting data files",
)
path_or_paths = NestedDataStructure(path_or_paths)
extracted_paths = NestedDataStructure(extracted_paths)
self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
return extracted_paths.data
def download_and_extract(self, url_or_urls):
"""Download and extract given `url_or_urls`.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def get_recorded_sizes_checksums(self):
return self._recorded_sizes_checksums.copy()
def delete_extracted_files(self):
paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
for key, path in list(self.extracted_paths.items()):
if path in paths_to_delete and os.path.isfile(path):
os.remove(path)
del self.extracted_paths[key]
def manage_extracted_files(self):
if self.download_config.delete_extracted:
self.delete_extracted_files()
| datasets-main | src/datasets/download/download_manager.py |
import glob
import io
import os
import posixpath
import re
import tarfile
import time
import xml.dom.minidom
import zipfile
from asyncio import TimeoutError
from io import BytesIO
from itertools import chain
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
from xml.etree import ElementTree as ET
import fsspec
from aiohttp.client_exceptions import ClientError
from .. import config
from ..filesystems import COMPRESSION_FILESYSTEMS
from ..utils.file_utils import (
get_authentication_headers_for_url,
get_datasets_user_agent,
http_head,
is_local_path,
is_relative_path,
url_or_path_join,
)
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .download_config import DownloadConfig
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = [
"txt",
"csv",
"json",
"jsonl",
"tsv",
"conll",
"conllu",
"orig",
"parquet",
"pkl",
"pickle",
"rel",
"xml",
]
COMPRESSION_EXTENSION_TO_PROTOCOL = {
# single file compression
**{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS},
# archive compression
"zip": "zip",
}
SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}
SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/")
MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
bytes.fromhex("504B0304"): "zip",
bytes.fromhex("504B0506"): "zip", # empty archive
bytes.fromhex("504B0708"): "zip", # spanned archive
bytes.fromhex("425A68"): "bz2",
bytes.fromhex("1F8B"): "gzip",
bytes.fromhex("FD377A585A00"): "xz",
bytes.fromhex("04224D18"): "lz4",
bytes.fromhex("28B52FFD"): "zstd",
}
MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
b"Rar!": "rar",
}
MAGIC_NUMBER_MAX_LENGTH = max(
len(magic_number)
for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
)
class NonStreamableDatasetError(Exception):
pass
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.join(a, *p)
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def xdirname(a):
"""
This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xdirname function allows you to apply the dirname on the first path of the chain.
Example::
>>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip")
zip://folder1::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
a = os.path.dirname(Path(a).as_posix())
else:
a = posixpath.dirname(a)
# if we end up at the root of the protocol, we get for example a = 'http:'
# so we have to fix it by adding the '//' that was removed:
if a.endswith(":"):
a += "//"
return "::".join([a] + b)
def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None):
"""Extend `os.path.exists` function to support both local and remote files.
Args:
urlpath (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
return os.path.exists(main_hop)
else:
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
return fs.exists(main_hop)
def xbasename(a):
"""
This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xbasename function allows you to apply the basename on the first path of the chain.
Example::
>>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip")
file.txt
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.basename(Path(a).as_posix())
else:
return posixpath.basename(a)
def xsplit(a):
"""
This function extends os.path.split to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplit function allows you to apply the xsplit on the first path of the chain.
Example::
>>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1::https://host.com/archive.zip', 'file.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.split(Path(a).as_posix())
else:
a, tail = posixpath.split(a)
return "::".join([a + "//" if a.endswith(":") else a] + b), tail
def xsplitext(a):
"""
This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplitext function allows you to apply the splitext on the first path of the chain.
Example::
>>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1/file::https://host.com/archive.zip', '.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.splitext(Path(a).as_posix())
else:
a, ext = posixpath.splitext(a)
return "::".join([a] + b), ext
def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool:
"""Extend `os.path.isfile` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isfile(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
return fs.isfile(main_hop)
def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int:
"""Extend `os.path.getsize` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`int`: optional
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.getsize(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
size = fs.size(main_hop)
if size is None:
# use xopen instead of fs.open to make data fetching more robust
with xopen(path, download_config=download_config) as f:
size = len(f.read())
return size
def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool:
"""Extend `os.path.isdir` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isdir(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
inner_path = main_hop.split("://")[1]
if not inner_path.strip("/"):
return True
return fs.isdir(inner_path)
def xrelpath(path, start=None):
"""Extend `os.path.relpath` function to support remote files.
Args:
path (`str`): URL path.
start (`str`): Start URL directory path.
Returns:
`str`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop)
else:
return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop)
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
disconnect_err = None
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except (ClientError, TimeoutError) as err:
disconnect_err = err
logger.warning(
f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected") from disconnect_err
return out
file_obj.read = read_with_retries
def _get_path_extension(path: str) -> str:
# Get extension: https://foo.bar/train.json.gz -> gz
extension = path.split(".")[-1]
# Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
# Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
for symb in "?-_":
extension = extension.split(symb)[0]
return extension
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
"""read the magic number from a file-like object and return the compression protocol"""
# Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
try:
f.seek(0)
except (AttributeError, io.UnsupportedOperation):
return None
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]:
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
urlpath = str(urlpath)
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if (
extension in BASE_KNOWN_EXTENSIONS
or extension in ["tgz", "tar"]
or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
):
return None
elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL:
return COMPRESSION_EXTENSION_TO_PROTOCOL[extension]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
try:
with fsspec.open(urlpath, **(storage_options or {})) as f:
return _get_extraction_protocol_with_magic_number(f)
except FileNotFoundError:
if urlpath.startswith(config.HF_ENDPOINT):
raise FileNotFoundError(
urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
) from None
else:
raise
def _prepare_path_and_storage_options(
urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Tuple[str, Dict[str, Dict[str, Any]]]:
prepared_urlpath = []
prepared_storage_options = {}
for hop in urlpath.split("::"):
hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config)
prepared_urlpath.append(hop)
prepared_storage_options.update(storage_options)
return "::".join(prepared_urlpath), storage_options
def _prepare_single_hop_path_and_storage_options(
urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Tuple[str, Dict[str, Dict[str, Any]]]:
"""
Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head
In particular it resolves google drive URLs
It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths.
Storage options are formatted in the form {protocol: storage_options_for_protocol}
"""
token = None if download_config is None else download_config.token
protocol = urlpath.split("://")[0] if "://" in urlpath else "file"
if download_config is not None and protocol in download_config.storage_options:
storage_options = download_config.storage_options[protocol]
elif download_config is not None and protocol not in download_config.storage_options:
storage_options = {
option_name: option_value
for option_name, option_value in download_config.storage_options.items()
if option_name not in fsspec.available_protocols()
}
else:
storage_options = {}
if storage_options:
storage_options = {protocol: storage_options}
if protocol in ["http", "https"]:
storage_options[protocol] = {
"headers": {
**get_authentication_headers_for_url(urlpath, token=token),
"user-agent": get_datasets_user_agent(),
},
"client_kwargs": {"trust_env": True}, # Enable reading proxy env variables.
**(storage_options.get(protocol, {})),
}
if "drive.google.com" in urlpath:
response = http_head(urlpath)
cookies = None
for k, v in response.cookies.items():
if k.startswith("download_warning"):
urlpath += "&confirm=" + v
cookies = response.cookies
storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})}
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in urlpath and "confirm=" not in urlpath:
urlpath += "&confirm=t"
if urlpath.startswith("https://raw.githubusercontent.com/"):
# Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389
storage_options[protocol]["headers"]["Accept-Encoding"] = "identity"
elif protocol == "hf":
storage_options[protocol] = {
"token": token,
"endpoint": config.HF_ENDPOINT,
**storage_options.get(protocol, {}),
}
return urlpath, storage_options
def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `open` function to support remote files using `fsspec`.
It also has a retry mechanism in case connection fails.
The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co
Args:
file (`str`): Path name of the file to be opened.
mode (`str`, *optional*, default "r"): Mode in which the file is opened.
*args: Arguments to be passed to `fsspec.open`.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs: Keyword arguments to be passed to `fsspec.open`.
Returns:
file object
"""
# This works as well for `xopen(str(Path(...)))`
file_str = _as_str(file)
main_hop, *rest_hops = file_str.split("::")
if is_local_path(main_hop):
return open(main_hop, mode, *args, **kwargs)
# add headers and cookies for authentication on the HF Hub and for Google Drive
file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config)
kwargs = {**kwargs, **(storage_options or {})}
try:
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
except ValueError as e:
if str(e) == "Cannot seek streaming HTTP file":
raise NonStreamableDatasetError(
"Streaming is not possible for this dataset because data host server doesn't support HTTP range "
"requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)"
) from e
else:
raise
except FileNotFoundError:
if file.startswith(config.HF_ENDPOINT):
raise FileNotFoundError(
file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
) from None
else:
raise
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]:
"""Extend `os.listdir` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`list` of `str`
"""
main_hop, *rest_hops = _as_str(path).split("::")
if is_local_path(main_hop):
return os.listdir(path)
else:
# globbing inside a zip in a private repo requires authentication
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
inner_path = main_hop.split("://")[1]
if inner_path.strip("/") and not fs.isdir(inner_path):
raise FileNotFoundError(f"Directory doesn't exist: {path}")
objects = fs.listdir(inner_path)
return [os.path.basename(obj["name"]) for obj in objects]
def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None):
"""Extend `glob.glob` function to support remote files.
Args:
urlpath (`str`): URL path with shell-style wildcard patterns.
recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more
directories or subdirectories.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`list` of `str`
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
return glob.glob(main_hop, recursive=recursive)
else:
# globbing inside a zip in a private repo requires authentication
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
inner_path = main_hop.split("://")[1]
globbed_paths = fs.glob(inner_path)
protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths]
def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `os.walk` function to support remote files.
Args:
urlpath (`str`): URL root path.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs: Additional keyword arguments forwarded to the underlying filesystem.
Yields:
`tuple`: 3-tuple (dirpath, dirnames, filenames).
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
yield from os.walk(main_hop, **kwargs)
else:
# walking inside a zip in a private repo requires authentication
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
inner_path = main_hop.split("://")[1]
if inner_path.strip("/") and not fs.isdir(inner_path):
return []
protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs):
yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames
class xPath(type(Path())):
"""Extension of `pathlib.Path` to support both local paths and remote URLs."""
def __str__(self):
path_str = super().__str__()
main_hop, *rest_hops = path_str.split("::")
if is_local_path(main_hop):
return main_hop
path_as_posix = path_str.replace("\\", "/")
path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
return path_as_posix
def exists(self, download_config: Optional[DownloadConfig] = None):
"""Extend `pathlib.Path.exists` method to support both local and remote files.
Args:
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
return xexists(str(self), download_config=download_config)
def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
"""Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
download_config : mainly use token or storage_options to support different platforms and auth types.
Yields:
[`xPath`]
"""
posix_path = self.as_posix()
main_hop, *rest_hops = posix_path.split("::")
if is_local_path(main_hop):
yield from Path(main_hop).glob(pattern)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops:
urlpath = rest_hops[0]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
storage_options = {urlpath.split("://")[0]: storage_options}
posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
globbed_paths = fs.glob(xjoin(main_hop, pattern))
for globbed_path in globbed_paths:
yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
def rglob(self, pattern, **kwargs):
"""Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
Yields:
[`xPath`]
"""
return self.glob("**/" + pattern, **kwargs)
@property
def parent(self) -> "xPath":
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
[`xPath`]
"""
return type(self)(xdirname(self.as_posix()))
@property
def name(self) -> str:
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).name
@property
def stem(self) -> str:
"""Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).stem
@property
def suffix(self) -> str:
"""Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).suffix
def open(self, *args, **kwargs):
"""Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
Args:
**args: Arguments passed to :func:`fsspec.open`.
**kwargs: Keyword arguments passed to :func:`fsspec.open`.
Returns:
`io.FileIO`: File-like object.
"""
return xopen(str(self), *args, **kwargs)
def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
"""Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
Args:
*p (`tuple` of `str`): Other path components.
Returns:
[`xPath`]
"""
return type(self)(xjoin(self.as_posix(), *p))
def __truediv__(self, p: str) -> "xPath":
return self.joinpath(p)
def with_suffix(self, suffix):
main_hop, *rest_hops = str(self).split("::")
if is_local_path(main_hop):
return type(self)(str(super().with_suffix(suffix)))
return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops))
def _as_str(path: Union[str, Path, xPath]):
return str(path) if isinstance(path, xPath) else str(xPath(str(path)))
def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
import gzip
if hasattr(filepath_or_buffer, "read"):
return gzip.open(filepath_or_buffer, *args, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
import numpy as np
if hasattr(filepath_or_buffer, "read"):
return np.load(filepath_or_buffer, *args, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import pandas as pd
if hasattr(filepath_or_buffer, "read"):
return pd.read_csv(filepath_or_buffer, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
if kwargs.get("compression", "infer") == "infer":
kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config)
return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import pandas as pd
if hasattr(filepath_or_buffer, "read"):
try:
return pd.read_excel(filepath_or_buffer, **kwargs)
except ValueError: # Cannot seek streaming HTTP file
return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
try:
return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
except ValueError: # Cannot seek streaming HTTP file
return pd.read_excel(
BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs
)
def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import scipy.io as sio
if hasattr(filepath_or_buffer, "read"):
return sio.loadmat(filepath_or_buffer, **kwargs)
else:
return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None):
"""Extend `xml.etree.ElementTree.parse` function to support remote files.
Args:
source: File path or file object.
parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`xml.etree.ElementTree.Element`: Root element of the given source document.
"""
if hasattr(source, "read"):
return ET.parse(source, parser=parser)
else:
with xopen(source, "rb", download_config=download_config) as f:
return ET.parse(f, parser=parser)
def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `xml.dom.minidom.parse` function to support remote files.
Args:
filename_or_file (`str` or file): File path or file object.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`.
Returns:
:obj:`xml.dom.minidom.Document`: Parsed document.
"""
if hasattr(filename_or_file, "read"):
return xml.dom.minidom.parse(filename_or_file, **kwargs)
else:
with xopen(filename_or_file, "rb", download_config=download_config) as f:
return xml.dom.minidom.parse(f, **kwargs)
class _IterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator: Callable, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
yield from self.generator(*self.args, **self.kwargs)
class ArchiveIterable(_IterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def _iter_from_urlpath(
cls, urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath, download_config=download_config)
with xopen(urlpath, "rb", download_config=download_config) as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config)
class FilesIterable(_IterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_urlpaths(
cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if xisfile(urlpath, download_config=download_config):
yield urlpath
elif xisdir(urlpath, download_config=download_config):
for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if xbasename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")):
# skipping hidden files
continue
yield xjoin(dirpath, filename)
else:
raise FileNotFoundError(urlpath)
@classmethod
def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
return cls(cls._iter_from_urlpaths, urlpaths, download_config)
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
built-in `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Normalize URL(s) of files to stream data from.
This is the lazy version of `DownloadManager.download` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, url_or_urls):
"""Add extraction protocol for given url(s) for streaming.
This is the lazy version of `DownloadManager.extract` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
return urlpaths
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
f"Please use `dl_manager.iter_archive` instead.\n\n"
f"Example usage:\n\n"
f"\turl = dl_manager.download(url)\n"
f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
f"\tfor filename, file in tar_archive_iterator:\n"
f"\t\t..."
)
if protocol is None:
# no extraction
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def download_and_extract(self, url_or_urls):
"""Prepare given `url_or_urls` for streaming (add extraction protocol).
This is the lazy version of `DownloadManager.download_and_extract` for streaming.
Is equivalent to:
```
urls = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) to stream from data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (`str` or `list` of `str`):
Root paths.
Yields:
str: File URL path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
| datasets-main | src/datasets/download/streaming_download_manager.py |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
logger = logging.get_logger(__name__)
_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
def _register_formatter(
formatter_cls: type,
format_type: Optional[str],
aliases: Optional[List[str]] = None,
):
"""
Register a Formatter object using a name and optional aliases.
This function must be used on a Formatter class.
"""
aliases = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
)
_FORMAT_TYPES[format_type] = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
)
_FORMAT_TYPES_ALIASES[alias] = format_type
def _register_unavailable_formatter(
unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
):
"""
Register an unavailable Formatter object using a name and optional aliases.
This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
"""
aliases = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
"""If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
"""
Factory function to get a Formatter given its type name and keyword arguments.
A formatter is an object that extracts and formats data from pyarrow table.
It defines the formatting for rows, colums and batches.
If the formatter for a given type name doesn't exist or is not available, an error is raised.
"""
format_type = get_format_type_from_alias(format_type)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**format_kwargs)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
)
| datasets-main | src/datasets/formatting/__init__.py |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Mapping, MutableMapping
from functools import partial
# Lint as: python3
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from packaging import version
from .. import config
from ..features import Features
from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
from ..table import Table
from ..utils.py_utils import no_op_if_value_is_null
T = TypeVar("T")
RowFormat = TypeVar("RowFormat")
ColumnFormat = TypeVar("ColumnFormat")
BatchFormat = TypeVar("BatchFormat")
def _is_range_contiguous(key: range) -> bool:
return key.step == 1 and key.stop >= key.start
def _raise_bad_key_type(key: Any):
raise TypeError(
f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
)
def _query_table_with_indices_mapping(
table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
account a shuffling or an indices selection for example.
The indices table must contain one column named "indices" of type uint64.
"""
if isinstance(key, int):
key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
return _query_table(table, key)
if isinstance(key, slice):
key = range(*key.indices(indices.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return _query_table(
table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
)
else:
pass # treat as an iterable
if isinstance(key, str):
table = table.select([key])
return _query_table(table, indices.column(0).to_pylist())
if isinstance(key, Iterable):
return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
_raise_bad_key_type(key)
def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
"""
if isinstance(key, int):
return table.fast_slice(key % table.num_rows, 1)
if isinstance(key, slice):
key = range(*key.indices(table.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return table.fast_slice(key.start, key.stop - key.start)
else:
pass # treat as an iterable
if isinstance(key, str):
return table.table.drop([column for column in table.column_names if column != key])
if isinstance(key, Iterable):
key = np.fromiter(key, np.int64)
if len(key) == 0:
return table.table.slice(0, 0)
# don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
return table.fast_gather(key % table.num_rows)
_raise_bad_key_type(key)
def _is_array_with_nulls(pa_array: pa.Array) -> bool:
return pa_array.null_count > 0
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
"""Return the first element of a batch (dict) as a row (dict)"""
return {key: array[0] for key, array in py_dict.items()}
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict()
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
return np.array(array, copy=False, dtype=object)
return np.array(array, copy=False)
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper)
class PythonFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch
class PandasFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch)
class LazyDict(MutableMapping):
"""A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
self.pa_table = pa_table
self.formatter = formatter
self.data = {key: None for key in pa_table.column_names}
self.keys_to_format = set(self.data.keys())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
value = self.data[key]
if key in self.keys_to_format:
value = self.format(key)
self.data[key] = value
self.keys_to_format.remove(key)
return value
def __setitem__(self, key, value):
if key in self.keys_to_format:
self.keys_to_format.remove(key)
self.data[key] = value
def __delitem__(self, key) -> None:
if key in self.keys_to_format:
self.keys_to_format.remove(key)
del self.data[key]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
def __or__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = inst.data | other.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = inst.data | other
return inst
return NotImplemented
def __ror__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = other.data | inst.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = other | inst.data
return inst
return NotImplemented
def __ior__(self, other):
if isinstance(other, LazyDict):
other = other.copy()
other._format_all()
self.keys_to_format -= other.data.keys()
self.data |= other.data
else:
self.keys_to_format -= other.keys()
self.data |= other
return self
def __copy__(self):
# Identical to `UserDict.__copy__`
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
return inst
def copy(self):
import copy
return copy.copy(self)
@classmethod
def fromkeys(cls, iterable, value=None):
raise NotImplementedError
def format(self, key):
raise NotImplementedError
def _format_all(self):
for key in self.keys_to_format:
self.data[key] = self.format(key)
self.keys_to_format.clear()
class LazyRow(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))[0]
class LazyBatch(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(self, features: Optional[Features] = None):
self.features = features
self.python_features_decoder = PythonFeaturesDecoder(self.features)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
def recursive_tensorize(self, data_struct: dict):
raise NotImplementedError
class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table)
class PythonFormatter(Formatter[Mapping, list, Mapping]):
def __init__(self, features=None, lazy=False):
super().__init__(features)
self.lazy = lazy
def format_row(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyRow(pa_table, self)
row = self.python_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyBatch(pa_table, self)
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return batch
class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
row = self.pandas_features_decoder.decode_batch(row)
return row
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs):
super().__init__(features=features)
self.transform = transform
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
) from exc
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch)
def _check_valid_column_key(key: str, columns: List[str]) -> None:
if key not in columns:
raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
if isinstance(key, int):
if (key < 0 and key + size < 0) or (key >= size):
raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
return
elif isinstance(key, slice):
pass
elif isinstance(key, range):
if len(key) > 0:
_check_valid_index_key(max(key), size=size)
_check_valid_index_key(min(key), size=size)
elif isinstance(key, Iterable):
if len(key) > 0:
_check_valid_index_key(int(max(key)), size=size)
_check_valid_index_key(int(min(key)), size=size)
else:
_raise_bad_key_type(key)
def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
if isinstance(key, int):
return "row"
elif isinstance(key, str):
return "column"
elif isinstance(key, (slice, range, Iterable)):
return "batch"
_raise_bad_key_type(key)
def query_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
indices: Optional[Table] = None,
) -> pa.Table:
"""
Query a Table to extract the subtable that correspond to the given key.
Args:
table (``datasets.table.Table``): The input Table to query from
key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
- an integer i: the subtable containing only the i-th row
- a slice [i:j:k]: the subtable containing the rows that correspond to this slice
- a range(i, j, k): the subtable containing the rows that correspond to this range
- a string c: the subtable containing all the rows but only the column c
- an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
The indices table must contain one column named "indices" of type uint64.
This is used in case of shuffling or rows selection.
Returns:
``pyarrow.Table``: the result of the query on the input table
"""
# Check if key is valid
if not isinstance(key, (int, slice, range, str, Iterable)):
_raise_bad_key_type(key)
if isinstance(key, str):
_check_valid_column_key(key, table.column_names)
else:
size = indices.num_rows if indices is not None else table.num_rows
_check_valid_index_key(key, size)
# Query the main table
if indices is None:
pa_subtable = _query_table(table, key)
else:
pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
return pa_subtable
def format_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
formatter: Formatter,
format_columns: Optional[list] = None,
output_all_columns=False,
):
"""
Format a Table depending on the key that was used and a Formatter object.
Args:
table (``datasets.table.Table``): The input Table to format
key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
the table as either a row, a column or a batch.
formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
PythonFormatter, NumpyFormatter, etc.
format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
Returns:
A row, column or batch formatted object defined by the Formatter:
- the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
- the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
- the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
- the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
- the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
"""
if isinstance(table, Table):
pa_table = table.table
else:
pa_table = table
query_type = key_to_query_type(key)
python_formatter = PythonFormatter(features=None)
if format_columns is None:
return formatter(pa_table, query_type=query_type)
elif query_type == "column":
if key in format_columns:
return formatter(pa_table, query_type)
else:
return python_formatter(pa_table, query_type=query_type)
else:
pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
formatted_output = formatter(pa_table_to_format, query_type=query_type)
if output_all_columns:
if isinstance(formatted_output, MutableMapping):
pa_table_with_remaining_columns = pa_table.drop(
col for col in pa_table.column_names if col in format_columns
)
remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
formatted_output.update(remaining_columns_dict)
else:
raise TypeError(
f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
)
return formatted_output
| datasets-main | src/datasets/formatting/formatting.py |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, **torch_tensor_kwargs):
super().__init__(features=features)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import torch
# support for torch, tf, jax etc.
if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| datasets-main | src/datasets/formatting/torch_formatter.py |
# Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
logger = get_logger()
DEVICE_MAPPING: Optional[dict] = None
class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]):
def __init__(self, features=None, device=None, **jnp_array_kwargs):
super().__init__(features=features)
import jax
from jaxlib.xla_client import Device
if isinstance(device, Device):
raise ValueError(
f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`."
)
self.device = device if isinstance(device, str) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default "
f"device: {str(jax.devices()[0])}."
)
self.device = str(jax.devices()[0])
self.jnp_array_kwargs = jnp_array_kwargs
@staticmethod
def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(device): device for device in jax.devices()}
def _consolidate(self, column):
import jax
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return jnp.stack(column, axis=0)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jax.Array":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| datasets-main | src/datasets/formatting/jax_formatter.py |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, **tf_tensor_kwargs):
super().__init__(features=features)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: F401 - import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import tensorflow as tf
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| datasets-main | src/datasets/formatting/tf_formatter.py |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct):
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object:
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
if isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| datasets-main | src/datasets/formatting/np_formatter.py |
import fnmatch
import json
import os
import shutil
import tempfile
import xml.etree.ElementTree as ET
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional
from datasets import config
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.download.mock_download_manager import MockDownloadManager
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.deprecation_utils import deprecated
from datasets.utils.logging import get_logger, set_verbosity_warning
from datasets.utils.py_utils import map_nested
logger = get_logger(__name__)
DEFAULT_ENCODING = "utf-8"
def dummy_data_command_factory(args):
return DummyDataCommand(
args.path_to_dataset,
args.auto_generate,
args.n_lines,
args.json_field,
args.xml_tag,
args.match_text_files,
args.keep_uncompressed,
args.cache_dir,
args.encoding,
)
class DummyDataGeneratorDownloadManager(DownloadManager):
def __init__(self, mock_download_manager, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mock_download_manager = mock_download_manager
self.downloaded_dummy_paths = []
self.expected_dummy_paths = []
def download(self, url_or_urls):
output = super().download(url_or_urls)
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def download_and_extract(self, url_or_urls):
output = super().extract(super().download(url_or_urls))
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def auto_generate_dummy_data_folder(
self,
n_lines: int = 5,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> bool:
os.makedirs(
os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
"dummy_data",
),
exist_ok=True,
)
total = 0
self.mock_download_manager.load_existing_dummy_data = False
for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
dst_path = os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
relative_dst_path,
)
total += self._create_dummy_data(
src_path,
dst_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
if total == 0:
logger.error(
"Dummy data generation failed: no dummy files were created. "
"Make sure the data files format is supported by the auto-generation."
)
return total > 0
def _create_dummy_data(
self,
src_path: str,
dst_path: str,
n_lines: int,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> int:
encoding = encoding or DEFAULT_ENCODING
if os.path.isfile(src_path):
logger.debug(f"Trying to generate dummy data file {dst_path}")
dst_path_extensions = Path(dst_path).suffixes
line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
if match_text_files is not None:
file_name = os.path.basename(dst_path)
for pattern in match_text_files.split(","):
is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
# Line by line text file (txt, csv etc.)
if is_line_by_line_text_file:
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
with open(dst_path, "w", encoding=encoding) as dst_file:
first_lines = []
for i, line in enumerate(src_file):
if i >= n_lines:
break
first_lines.append(line)
dst_file.write("".join(first_lines).strip())
return 1
# json file
elif ".json" in dst_path_extensions:
with open(src_path, encoding=encoding) as src_file:
json_data = json.load(src_file)
if json_field is not None:
json_data = json_data[json_field]
if isinstance(json_data, dict):
if not all(isinstance(v, list) for v in json_data.values()):
raise ValueError(
f"Couldn't parse columns {list(json_data.keys())}. "
"Maybe specify which json field must be used "
"to read the data with --json_field <my_field>."
)
first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
else:
first_json_data = json_data[:n_lines]
if json_field is not None:
first_json_data = {json_field: first_json_data}
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(dst_path, "w", encoding=encoding) as dst_file:
json.dump(first_json_data, dst_file)
return 1
# xml file
elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
if xml_tag is None:
logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
else:
self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
return 1
logger.warning(
f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
)
return 0
# directory, iterate through all files
elif os.path.isdir(src_path):
total = 0
for path, _, files in os.walk(src_path):
for name in files:
if not name.startswith("."): # ignore files like .DS_Store etc.
src_file_path = os.path.join(path, name)
dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
total += self._create_dummy_data(
src_file_path,
dst_file_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
return total
@staticmethod
def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
n_line = 0
parents = []
for event, elem in ET.iterparse(src_file, events=("start", "end")):
if event == "start":
parents.append(elem)
else:
_ = parents.pop()
if elem.tag == xml_tag:
if n_line < n_lines:
n_line += 1
else:
if parents:
parents[-1].remove(elem)
ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
def compress_autogenerated_dummy_data(self, path_to_dataset):
root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
base_name = os.path.join(root_dir, "dummy_data")
base_dir = "dummy_data"
logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
shutil.make_archive(base_name, "zip", root_dir, base_dir)
shutil.rmtree(base_name)
@deprecated(
"The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
)
class DummyDataCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
test_parser.add_argument(
"--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
)
test_parser.add_argument(
"--json_field",
type=str,
default=None,
help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
)
test_parser.add_argument(
"--xml_tag",
type=str,
default=None,
help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
)
test_parser.add_argument(
"--match_text_files",
type=str,
default=None,
help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
)
test_parser.add_argument(
"--keep_uncompressed",
action="store_true",
help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
)
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory to download and cache files when auto-generating dummy data",
)
test_parser.add_argument(
"--encoding",
type=str,
default=None,
help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
)
test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
test_parser.set_defaults(func=dummy_data_command_factory)
def __init__(
self,
path_to_dataset: str,
auto_generate: bool,
n_lines: int,
json_field: Optional[str],
xml_tag: Optional[str],
match_text_files: Optional[str],
keep_uncompressed: bool,
cache_dir: Optional[str],
encoding: Optional[str],
):
self._path_to_dataset = path_to_dataset
if os.path.isdir(path_to_dataset):
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
else:
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
self._auto_generate = auto_generate
self._n_lines = n_lines
self._json_field = json_field
self._xml_tag = xml_tag
self._match_text_files = match_text_files
self._keep_uncompressed = keep_uncompressed
self._cache_dir = cache_dir
self._encoding = encoding
def run(self):
set_verbosity_warning()
dataset_module = dataset_module_factory(self._path_to_dataset)
builder_cls = import_main_class(dataset_module.module_path)
# use `None` as config if no configs
builder_configs = builder_cls.BUILDER_CONFIGS or [None]
auto_generate_results = []
with tempfile.TemporaryDirectory() as tmp_dir:
for builder_config in builder_configs:
config_name = builder_config.name if builder_config else None
dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
version = builder_config.version if builder_config else dataset_builder.config.version
mock_dl_manager = MockDownloadManager(
dataset_name=self._dataset_name,
config=builder_config,
version=version,
use_local_dummy_data=True,
load_existing_dummy_data=False,
)
if self._auto_generate:
auto_generate_results.append(
self._autogenerate_dummy_data(
dataset_builder=dataset_builder,
mock_dl_manager=mock_dl_manager,
keep_uncompressed=self._keep_uncompressed,
)
)
else:
self._print_dummy_data_instructions(
dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
)
if self._auto_generate and not self._keep_uncompressed:
if all(auto_generate_results):
print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
else:
print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
dl_cache_dir = (
os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
if self._cache_dir
else config.DOWNLOADED_DATASETS_PATH
)
download_config = DownloadConfig(cache_dir=dl_cache_dir)
dl_manager = DummyDataGeneratorDownloadManager(
dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
)
dataset_builder._split_generators(dl_manager)
mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
dl_manager.auto_generate_dummy_data_folder(
n_lines=self._n_lines,
json_field=self._json_field,
xml_tag=self._xml_tag,
match_text_files=self._match_text_files,
encoding=self._encoding,
)
if not keep_uncompressed:
path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
# now test that the dummy_data.zip file actually works
mock_dl_manager.load_existing_dummy_data = True # use real dummy data
n_examples_per_split = {}
os.makedirs(dataset_builder._cache_dir, exist_ok=True)
try:
split_generators = dataset_builder._split_generators(mock_dl_manager)
for split_generator in split_generators:
dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
except OSError as e:
logger.error(
f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
+ str(e)
)
return False
else:
if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
logger.warning(
f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
)
return True
else:
empty_splits = [
split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
]
logger.warning(
f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
)
return False
else:
generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(
f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
"Please compress this directory into a zip file to use it for dummy data tests."
)
def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
os.makedirs(dummy_data_folder, exist_ok=True)
try:
generator_splits = dataset_builder._split_generators(mock_dl_manager)
except FileNotFoundError as e:
print(
f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
)
files_to_create = set()
split_names = []
dummy_file_name = mock_dl_manager.dummy_file_name
for split in generator_splits:
logger.info(f"Collecting dummy data file paths to create for {split.name}")
split_names.append(split.name)
gen_kwargs = split.gen_kwargs
generator = dataset_builder._generate_examples(**gen_kwargs)
try:
dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
config_string = (
f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
)
dummy_data_guidance_print += (
"- In order to create the dummy data for "
+ config_string
+ f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
)
# trigger generate function
for key, record in generator:
pass
dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
except FileNotFoundError as e:
files_to_create.add(e.filename)
split_names = ", ".join(split_names)
if len(files_to_create) > 0:
# no glob.glob(...) in `_generate_examples(...)`
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
files_string = dummy_file_name
else:
files_string = ", ".join(files_to_create)
dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
else:
dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
dummy_data_guidance_print += (
f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
)
dummy_data_guidance_print += 83 * "=" + "\n"
print(dummy_data_guidance_print)
| datasets-main | src/datasets/commands/dummy_data.py |
import os
from argparse import ArgumentParser
from pathlib import Path
from shutil import copyfile
from typing import List
from datasets import config
from datasets.builder import DatasetBuilder
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadMode
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.info_utils import VerificationMode
def run_beam_command_factory(args, **kwargs):
return RunBeamCommand(
args.dataset,
args.name,
args.cache_dir,
args.beam_pipeline_options,
args.data_dir,
args.all_configs,
args.save_info or args.save_infos,
args.ignore_verifications,
args.force_redownload,
**kwargs,
)
class RunBeamCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
run_beam_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored",
)
run_beam_parser.add_argument(
"--beam_pipeline_options",
type=str,
default="",
help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
)
run_beam_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from",
)
run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
run_beam_parser.add_argument(
"--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
)
run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
# aliases
run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
run_beam_parser.set_defaults(func=run_beam_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
beam_pipeline_options: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
**config_kwargs,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._beam_pipeline_options = beam_pipeline_options
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._config_kwargs = config_kwargs
def run(self):
import apache_beam as beam
if self._name is not None and self._all_configs:
print("Both parameters `name` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
dataset_module = dataset_module_factory(path)
builder_cls = import_main_class(dataset_module.module_path)
builders: List[DatasetBuilder] = []
if self._beam_pipeline_options:
beam_options = beam.options.pipeline_options.PipelineOptions(
flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
)
else:
beam_options = None
if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
for builder_config in builder_cls.BUILDER_CONFIGS:
builders.append(
builder_cls(
config_name=builder_config.name,
data_dir=self._data_dir,
hash=dataset_module.hash,
beam_options=beam_options,
cache_dir=self._cache_dir,
base_path=dataset_module.builder_kwargs.get("base_path"),
)
)
else:
builders.append(
builder_cls(
config_name=config_name,
data_dir=self._data_dir,
beam_options=beam_options,
cache_dir=self._cache_dir,
base_path=dataset_module.builder_kwargs.get("base_path"),
**self._config_kwargs,
)
)
for builder in builders:
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
try_from_hf_gcs=False,
)
if self._save_infos:
builder._save_infos()
print("Apache beam run successful.")
# If save_infos=True, the dataset infos file is created next to the loaded module file.
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
else: # in case of a remote dataset
print(f"Dataset Infos file saved at {dataset_infos_path}")
exit(1)
# Move datasetinfo back to the user
user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
copyfile(dataset_infos_path, user_dataset_infos_path)
print(f"Dataset Infos file saved at {user_dataset_infos_path}")
| datasets-main | src/datasets/commands/run_beam.py |
import platform
from argparse import ArgumentParser
import huggingface_hub
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env", help="Print relevant system environment info.")
download_parser.set_defaults(func=info_command_factory)
def run(self):
info = {
"`datasets` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"PyArrow version": pyarrow.__version__,
"Pandas version": pandas.__version__,
}
print("\nCopy-and-paste the text below in your GitHub issue.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| datasets-main | src/datasets/commands/env.py |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
HIGHLIGHT_MESSAGE_POST = """=======
>>>>>>>
"""
TO_HIGHLIGHT = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
TO_CONVERT = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
Returns: ConvertCommand
"""
return ConvertCommand(args.tfds_path, args.datasets_directory)
class ConvertCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the datasets-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
)
train_parser.add_argument(
"--tfds_path",
type=str,
required=True,
help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
)
train_parser.add_argument(
"--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, tfds_path: str, datasets_directory: str, *args):
self._logger = get_logger("datasets-cli/converting")
self._tfds_path = tfds_path
self._datasets_directory = datasets_directory
def run(self):
if os.path.isdir(self._tfds_path):
abs_tfds_path = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
abs_tfds_path = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
abs_datasets_path = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
utils_files = []
with_manual_update = []
imports_to_builder_map = {}
if os.path.isdir(self._tfds_path):
file_names = os.listdir(abs_tfds_path)
else:
file_names = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
input_file = os.path.join(abs_tfds_path, f_name)
output_file = os.path.join(abs_datasets_path, f_name)
if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(input_file, encoding="utf-8") as f:
lines = f.readlines()
out_lines = []
is_builder = False
needs_manual_update = False
tfds_imports = []
for line in lines:
out_line = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
out_line = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
out_line = ""
continue
elif "from absl import logging" in out_line:
out_line = "from datasets import logging\n"
elif "getLogger" in out_line:
out_line = out_line.replace("getLogger", "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
needs_manual_update = True
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
out_lines.append(out_line)
out_lines.append(HIGHLIGHT_MESSAGE_POST)
continue
else:
for pattern, replacement in TO_CONVERT:
out_line = re.sub(pattern, replacement, out_line)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
out_line = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
is_builder = True
out_lines.append(out_line)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
dir_name = f_name.replace(".py", "")
output_dir = os.path.join(abs_datasets_path, dir_name)
output_file = os.path.join(output_dir, f_name)
os.makedirs(output_dir, exist_ok=True)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(output_file)
if needs_manual_update:
with_manual_update.append(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.writelines(out_lines)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
f_name = os.path.basename(utils_file)
dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(utils_file, dest_folder)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
)
| datasets-main | src/datasets/commands/convert.py |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDatasetsCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| datasets-main | src/datasets/commands/__init__.py |
import os
from argparse import ArgumentParser
from pathlib import Path
from shutil import copyfile, rmtree
from typing import Generator
import datasets.config
from datasets.builder import DatasetBuilder
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_manager import DownloadMode
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.filelock import logger as fl_logger
from datasets.utils.info_utils import VerificationMode
from datasets.utils.logging import ERROR, get_logger
logger = get_logger(__name__)
def _test_command_factory(args):
return TestCommand(
args.dataset,
args.name,
args.cache_dir,
args.data_dir,
args.all_configs,
args.save_info or args.save_infos,
args.ignore_verifications,
args.force_redownload,
args.clear_cache,
)
class TestCommand(BaseDatasetsCLICommand):
__test__ = False # to tell pytest it's not a test class
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("test", help="Test dataset implementation.")
test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored.",
)
test_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
test_parser.add_argument(
"--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
)
test_parser.add_argument(
"--ignore_verifications",
action="store_true",
help="Run the test without checksums and splits checks.",
)
test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
test_parser.add_argument(
"--clear_cache",
action="store_true",
help="Remove downloaded files and cached datasets after each config test",
)
# aliases
test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=_test_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
clear_cache: bool,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._clear_cache = clear_cache
if clear_cache and not cache_dir:
print(
"When --clear_cache is used, specifying a cache directory is mandatory.\n"
"The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
"Please provide a --cache_dir that will be used to test the dataset script."
)
exit(1)
if save_infos:
self._ignore_verifications = True
def run(self):
fl_logger().setLevel(ERROR)
if self._name is not None and self._all_configs:
print("Both parameters `config` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
module = dataset_module_factory(path)
builder_cls = import_main_class(module.module_path)
n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
def get_builders() -> Generator[DatasetBuilder, None, None]:
if self._all_configs and builder_cls.BUILDER_CONFIGS:
for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
if "config_name" in module.builder_kwargs:
yield builder_cls(
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
yield builder_cls(
config_name=config.name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
if "config_name" in module.builder_kwargs:
yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
else:
yield builder_cls(
config_name=config_name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
for j, builder in enumerate(get_builders()):
print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
builder._record_infos = os.path.exists(
os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
) # record checksums only if we need to update a (deprecated) dataset_infos.json
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
try_from_hf_gcs=False,
)
builder.as_dataset()
if self._save_infos:
builder._save_infos()
# If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
# The dataset_infos are saved in the YAML part of the README.md
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_readme_path = os.path.join(builder_cls.get_imported_module_dir(), "README.md")
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
elif os.path.isdir(path): # for local directories containing only data files
dataset_dir = path
else: # in case of a remote dataset
dataset_dir = None
print(f"Dataset card saved at {dataset_readme_path}")
# Move dataset_info back to the user
if dataset_dir is not None:
user_dataset_readme_path = os.path.join(dataset_dir, "README.md")
copyfile(dataset_readme_path, user_dataset_readme_path)
print(f"Dataset card saved at {user_dataset_readme_path}")
# If clear_cache=True, the download folder and the dataset builder cache directory are deleted
if self._clear_cache:
if os.path.isdir(builder._cache_dir):
logger.warning(f"Clearing cache at {builder._cache_dir}")
rmtree(builder._cache_dir)
download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
if os.path.isdir(download_dir):
logger.warning(f"Clearing cache at {download_dir}")
rmtree(download_dir)
print("Test successful.")
| datasets-main | src/datasets/commands/test.py |
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
| datasets-main | src/datasets/commands/datasets_cli.py |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self.file = fsspec.open(
fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.file.path.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
return self.file.open().read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extension = ".bz2"
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extension = ".gz"
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extension = ".lz4"
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extension = ".xz"
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of zstd file as a filesystem with one file inside.
Note that reading in binary mode with fsspec isn't supported yet:
https://github.com/indygreg/python-zstandard/issues/136
"""
protocol = "zstd"
compression = "zstd"
extension = ".zst"
def __init__(
self,
fo: str,
mode: str = "rb",
target_protocol: Optional[str] = None,
target_options: Optional[dict] = None,
block_size: int = DEFAULT_BLOCK_SIZE,
**kwargs,
):
super().__init__(
fo=fo,
mode=mode,
target_protocol=target_protocol,
target_options=target_options,
block_size=block_size,
**kwargs,
)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_enter = self.file.__enter__
class WrappedFile:
def __init__(self, file_):
self._file = file_
def __enter__(self):
self._file.__enter__()
return self
def __exit__(self, *args, **kwargs):
self._file.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._file)
def __next__(self):
return next(self._file)
def __getattr__(self, attr):
return getattr(self._file, attr)
def fixed_enter(*args, **kwargs):
return WrappedFile(_enter(*args, **kwargs))
self.file.__enter__ = fixed_enter
| datasets-main | src/datasets/filesystems/compression.py |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
is_local = not is_remote_filesystem(fs)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
| datasets-main | src/datasets/filesystems/__init__.py |
import s3fs
from ..utils.deprecation_utils import deprecated
@deprecated("Use s3fs.S3FileSystem instead.")
class S3FileSystem(s3fs.S3FileSystem):
"""
`datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
Args:
anon (`bool`, default to `False`):
Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
key (`str`):
If not anonymous, use this access key ID, if specified.
secret (`str`):
If not anonymous, use this secret access key, if specified.
token (`str`):
If not anonymous, use this security token, if specified.
use_ssl (`bool`, defaults to `True`):
Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
s3_additional_kwargs (`dict`):
Parameters that are used when calling S3 API methods. Typically used for things
like ServerSideEncryption.
client_kwargs (`dict`):
Parameters for the botocore client.
requester_pays (`bool`, defaults to `False`):
Whether `RequesterPays` buckets are supported.
default_block_size (`int`):
If given, the default block size value used for `open()`, if no specific value is given at all time.
The built-in default is 5MB.
default_fill_cache (`bool`, defaults to `True`):
Whether to use cache filling with open by default. Refer to `S3File.open`.
default_cache_type (`str`, defaults to `bytes`):
If given, the default `cache_type` value used for `open()`. Set to `none` if no
caching is desired. See fsspec's documentation for other available `cache_type` values.
version_aware (`bool`, defaults to `False`):
Whether to support bucket versioning. If enable this will require the user to have
the necessary IAM permissions for dealing with versioned objects.
cache_regions (`bool`, defaults to `False`):
Whether to cache bucket regions. Whenever a new bucket is used, it will
first find out which region it belongs to and then use the client for that region.
asynchronous (`bool`, defaults to `False`):
Whether this instance is to be used from inside coroutines.
config_kwargs (`dict`):
Parameters passed to `botocore.client.Config`.
**kwargs:
Other parameters for core session.
session (`aiobotocore.session.AioSession`):
Session to be used for all connections. This session will be used inplace of creating
a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
skip_instance_cache (`bool`):
Control reuse of instances. Passed on to `fsspec`.
use_listings_cache (`bool`):
Control reuse of directory listings. Passed on to `fsspec`.
listings_expiry_time (`int` or `float`):
Control reuse of directory listings. Passed on to `fsspec`.
max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
Examples:
Listing files from public S3 bucket.
```py
>>> import datasets
>>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
>>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
['dataset_info.json.json','dataset.arrow','state.json']
```
Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
```py
>>> import datasets
>>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
['dataset_info.json.json','dataset.arrow','state.json']
```
Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
```py
>>> import botocore
>>> from datasets.filesystems import S3Filesystem
>>> s3_session = botocore.session.Session(profile_name='my_profile_name')
>>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
```
Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
```py
>>> from datasets import load_from_disk
>>> from datasets.filesystems import S3Filesystem
>>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
>>> print(len(dataset))
25000
```
Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
```py
>>> from datasets import load_dataset
>>> from datasets.filesystems import S3Filesystem
>>> dataset = load_dataset("imdb")
>>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
```
"""
| datasets-main | src/datasets/filesystems/s3filesystem.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from typing import Callable
from fastapi import FastAPI
from app.db.tasks import close_db_connection, connect_to_db
def create_start_app_handler(app: FastAPI) -> Callable:
async def start_app() -> None:
await connect_to_db(app)
return start_app
def create_stop_app_handler(app: FastAPI) -> Callable:
async def stop_app() -> None:
await close_db_connection(app)
return stop_app
| collaborative-training-auth-main | backend/app/core/tasks.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from databases import DatabaseURL
from starlette.config import Config
from starlette.datastructures import Secret
config = Config(".env")
PROJECT_NAME = "whitelist"
VERSION = "1.0.0"
API_PREFIX = "/api"
SECRET_KEY = config("SECRET_KEY", cast=Secret)
POSTGRES_USER = config("POSTGRES_USER", cast=str)
POSTGRES_PASSWORD = config("POSTGRES_PASSWORD", cast=Secret)
POSTGRES_SERVER = config("POSTGRES_SERVER", cast=str, default="db")
POSTGRES_PORT = config("POSTGRES_PORT", cast=str, default="5432")
POSTGRES_DB = config("POSTGRES_DB", cast=str)
DATABASE_URL = config(
"DATABASE_URL",
cast=DatabaseURL,
default=f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_SERVER}:{POSTGRES_PORT}/{POSTGRES_DB}",
)
EXPIRATION_MINUTES = 60 * 6
| collaborative-training-auth-main | backend/app/core/config.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
import datetime
from typing import Optional
from pydantic import IPvAnyAddress, validator
from app.models.core import CoreModel
class HivemindAccess(CoreModel):
username: str
peer_public_key: bytes
expiration_time: datetime.datetime
signature: bytes
class ExperimentJoinInput(CoreModel):
"""
All common characteristics of our Experiment resource
"""
peer_public_key: Optional[bytes] # bytes
class ExperimentJoinOutput(CoreModel):
"""
All common characteristics of our Experiment resource
"""
coordinator_ip: Optional[IPvAnyAddress]
coordinator_port: Optional[int]
hivemind_access: HivemindAccess
auth_server_public_key: bytes
@validator("coordinator_port")
def validate_port(cls, port):
if port is None:
return port
if int(port) > 2 ** 16:
raise ValueError("port overflow")
return port
| collaborative-training-auth-main | backend/app/models/experiment_join.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
| collaborative-training-auth-main | backend/app/models/__init__.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from datetime import datetime
from typing import Optional
from pydantic import BaseModel, validator
class CoreModel(BaseModel):
"""
Any common logic to be shared by all models goes here.
"""
pass
class DateTimeModelMixin(BaseModel):
created_at: Optional[datetime]
updated_at: Optional[datetime]
@validator("created_at", "updated_at", pre=True)
def default_datetime(cls, value: datetime) -> datetime:
return value or datetime.datetime.now()
class IDModelMixin(BaseModel):
id: int
| collaborative-training-auth-main | backend/app/models/core.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from typing import Optional
from pydantic import IPvAnyAddress, validator
from app.models.core import CoreModel, DateTimeModelMixin, IDModelMixin
class ExperimentBase(CoreModel):
"""
All common characteristics of our Experiment resource
"""
organization_name: Optional[str]
model_name: Optional[str]
coordinator_ip: Optional[IPvAnyAddress]
coordinator_port: Optional[int]
@validator("coordinator_port")
def validate_port(cls, port):
if port is None:
return port
if int(port) > 2 ** 16:
raise ValueError("port overflow")
return port
class ExperimentCreatePublic(ExperimentBase):
organization_name: str
model_name: str
class ExperimentCreate(ExperimentBase):
organization_name: str
model_name: str
auth_server_public_key: Optional[bytes]
auth_server_private_key: Optional[bytes]
class ExperimentUpdate(ExperimentBase):
pass
class ExperimentInDB(IDModelMixin, DateTimeModelMixin, ExperimentBase):
organization_name: str
model_name: str
creator: str
auth_server_public_key: Optional[bytes]
auth_server_private_key: Optional[bytes]
class ExperimentPublic(IDModelMixin, DateTimeModelMixin, ExperimentBase):
organization_name: str
model_name: str
creator: str
coordinator_ip: Optional[IPvAnyAddress]
coordinator_port: Optional[int]
class DeletedExperimentPublic(IDModelMixin, CoreModel):
pass
| collaborative-training-auth-main | backend/app/models/experiment.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
import logging
import os
from databases import Database
from fastapi import FastAPI
from app.core.config import DATABASE_URL
logger = logging.getLogger(__name__)
async def connect_to_db(app: FastAPI) -> None:
DB_URL = f"{DATABASE_URL}_test" if os.environ.get("TESTING") else DATABASE_URL
database = Database(DB_URL, min_size=2, max_size=10)
try:
await database.connect()
app.state._db = database
except Exception as e:
logger.warn("--- DB CONNECTION ERROR ---")
logger.warn(e)
logger.warn("--- DB CONNECTION ERROR ---")
async def close_db_connection(app: FastAPI) -> None:
try:
await app.state._db.disconnect()
except Exception as e:
logger.warn("--- DB DISCONNECT ERROR ---")
logger.warn(e)
logger.warn("--- DB DISCONNECT ERROR ---")
| collaborative-training-auth-main | backend/app/db/tasks.py |
from typing import Tuple
from sqlalchemy import TIMESTAMP, Column, Integer, LargeBinary, MetaData, Table, Text, UniqueConstraint, func
from sqlalchemy_utils import IPAddressType
metadata = MetaData()
def timestamps(indexed: bool = False) -> Tuple[Column, Column]:
return (
Column(
"created_at",
TIMESTAMP(timezone=True),
server_default=func.now(),
nullable=False,
index=indexed,
),
Column(
"updated_at",
TIMESTAMP(timezone=True),
server_default=func.now(),
nullable=False,
index=indexed,
),
)
experiments_table = Table(
"experiments",
metadata,
Column("id", Integer, primary_key=True),
Column("organization_name", Text, nullable=False, index=True),
Column("model_name", Text, nullable=False, index=True),
Column("creator", Text, nullable=False, index=True),
Column("coordinator_ip", IPAddressType),
Column("coordinator_port", Integer),
Column("auth_server_public_key", LargeBinary),
Column("auth_server_private_key", LargeBinary),
*timestamps(),
UniqueConstraint("organization_name", "model_name", name="uix_1"),
)
| collaborative-training-auth-main | backend/app/db/models.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
| collaborative-training-auth-main | backend/app/db/__init__.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
import logging
import os
import pathlib
import sys
from logging.config import fileConfig
import alembic
from psycopg2 import DatabaseError
from sqlalchemy import create_engine, engine_from_config, pool
# we're appending the app directory to our path here so that we can import config easily
sys.path.append(str(pathlib.Path(__file__).resolve().parents[3]))
from app.core.config import DATABASE_URL, POSTGRES_DB # noqa
from app.db.models import metadata # noqa
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = metadata
# Alembic Config object, which provides access to values within the .ini file
config = alembic.context.config
# Interpret the config file for logging
fileConfig(config.config_file_name)
logger = logging.getLogger("alembic.env")
def run_migrations_online() -> None:
"""
Run migrations in 'online' mode
"""
DB_URL = f"{DATABASE_URL}_test" if os.environ.get("TESTING") else str(DATABASE_URL)
# handle testing config for migrations
if os.environ.get("TESTING"):
# connect to primary db
default_engine = create_engine(str(DATABASE_URL), isolation_level="AUTOCOMMIT")
# drop testing db if it exists and create a fresh one
with default_engine.connect() as default_conn:
default_conn.execute(f"DROP DATABASE IF EXISTS {POSTGRES_DB}_test")
default_conn.execute(f"CREATE DATABASE {POSTGRES_DB}_test")
connectable = config.attributes.get("connection", None)
config.set_main_option("sqlalchemy.url", DB_URL)
if connectable is None:
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
alembic.context.configure(connection=connection, target_metadata=target_metadata)
with alembic.context.begin_transaction():
alembic.context.run_migrations()
def run_migrations_offline() -> None:
"""
Run migrations in 'offline' mode.
"""
if os.environ.get("TESTING"):
raise DatabaseError("Running testing migrations offline currently not permitted.")
alembic.context.configure(url=str(DATABASE_URL), target_metadata=target_metadata)
with alembic.context.begin_transaction():
alembic.context.run_migrations()
if alembic.context.is_offline_mode():
logger.info("Running migrations offline")
run_migrations_offline()
else:
logger.info("Running migrations online")
run_migrations_online()
| collaborative-training-auth-main | backend/app/db/migrations/env.py |
"""init table
Revision ID: a7841c3b04d0
Revises: 1ff5763812c1
Create Date: 2021-11-15 18:16:30.192984
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic
revision = "a7841c3b04d0"
down_revision = "1ff5763812c1"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_users_username", table_name="users")
op.drop_table("users")
op.drop_index("ix_experiments_name", table_name="experiments")
op.drop_index("ix_experiments_owner", table_name="experiments")
op.drop_table("experiments")
op.drop_index("ix_whitelist_experiment_id", table_name="whitelist")
op.drop_table("whitelist")
op.drop_table("collaborators")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"collaborators",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("peer_public_key", postgresql.BYTEA(), autoincrement=False, nullable=False),
sa.Column("user_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("whitelist_item_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name="collaborators_pkey"),
)
op.create_table(
"whitelist",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("experiment_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("user_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name="whitelist_pkey"),
)
op.create_index("ix_whitelist_experiment_id", "whitelist", ["experiment_id"], unique=False)
op.create_table(
"experiments",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("name", sa.TEXT(), autoincrement=False, nullable=False),
sa.Column("owner", sa.TEXT(), autoincrement=False, nullable=False),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column("coordinator_ip", sa.VARCHAR(length=50), autoincrement=False, nullable=True),
sa.Column("coordinator_port", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("auth_server_public_key", postgresql.BYTEA(), autoincrement=False, nullable=True),
sa.Column("auth_server_private_key", postgresql.BYTEA(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint("id", name="experiments_pkey"),
)
op.create_index("ix_experiments_owner", "experiments", ["owner"], unique=False)
op.create_index("ix_experiments_name", "experiments", ["name"], unique=False)
op.create_table(
"users",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("username", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name="users_pkey"),
)
op.create_index("ix_users_username", "users", ["username"], unique=True)
# ### end Alembic commands ###
| collaborative-training-auth-main | backend/app/db/migrations/versions/a7841c3b04d0_drop_everything.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
"""create_main_tables
Revision ID: 2dc2179b353f
Revises:
Create Date: 2021-04-28 07:54:49.181680
""" # noqa
from typing import Tuple
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic
revision = "2dc2179b353f"
down_revision = None
branch_labels = None
depends_on = None
def create_updated_at_trigger() -> None:
op.execute(
"""
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS
$$
BEGIN
NEW.updated_at = now();
RETURN NEW;
END;
$$ language 'plpgsql';
"""
)
def timestamps(indexed: bool = False) -> Tuple[sa.Column, sa.Column]:
return (
sa.Column(
"created_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.func.now(),
nullable=False,
index=indexed,
),
sa.Column(
"updated_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.func.now(),
nullable=False,
index=indexed,
),
)
def create_experiments_table() -> None:
op.create_table(
"experiments",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.Text, nullable=False, index=True),
sa.Column("owner", sa.Text, nullable=False, index=True),
*timestamps(),
)
op.execute(
"""
CREATE TRIGGER update_experiments_modtime
BEFORE UPDATE
ON experiments
FOR EACH ROW
EXECUTE PROCEDURE update_updated_at_column();
"""
)
def create_whitelist_table() -> None:
op.create_table(
"whitelist",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("experiment_id", sa.Integer, nullable=False, index=True),
sa.Column("user_id", sa.Integer, nullable=False, index=False),
*timestamps(),
)
op.execute(
"""
CREATE TRIGGER update_whitelist_modtime
BEFORE UPDATE
ON whitelist
FOR EACH ROW
EXECUTE PROCEDURE update_updated_at_column();
"""
)
def create_users_table() -> None:
op.create_table(
"users",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("username", sa.Text, unique=True, nullable=True, index=True),
*timestamps(),
)
op.execute(
"""
CREATE TRIGGER update_user_modtime
BEFORE UPDATE
ON users
FOR EACH ROW
EXECUTE PROCEDURE update_updated_at_column();
"""
)
def upgrade() -> None:
create_updated_at_trigger()
create_experiments_table()
create_whitelist_table()
create_users_table()
def downgrade() -> None:
op.drop_table("users")
op.drop_table("experiments")
op.drop_table("whitelist")
op.execute("DROP FUNCTION update_updated_at_column")
| collaborative-training-auth-main | backend/app/db/migrations/versions/2dc2179b353f_create_main_tables.py |
"""create experiments table
Revision ID: 97659da4900e
Revises: a7841c3b04d0
Create Date: 2021-11-15 18:18:47.732081
"""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
# revision identifiers, used by Alembic
revision = "97659da4900e"
down_revision = "a7841c3b04d0"
branch_labels = None
depends_on = None
def create_updated_at_trigger() -> None:
op.execute(
"""
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS
$$
BEGIN
NEW.updated_at = now();
RETURN NEW;
END;
$$ language 'plpgsql';
"""
)
def upgrade() -> None:
create_updated_at_trigger()
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"experiments",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("organization_name", sa.Text(), nullable=False),
sa.Column("model_name", sa.Text(), nullable=False),
sa.Column("creator", sa.Text(), nullable=False),
sa.Column("coordinator_ip", sqlalchemy_utils.types.ip_address.IPAddressType(length=50), nullable=True),
sa.Column("coordinator_port", sa.Integer(), nullable=True),
sa.Column("auth_server_public_key", sa.LargeBinary(), nullable=True),
sa.Column("auth_server_private_key", sa.LargeBinary(), nullable=True),
sa.Column("created_at", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated_at", sa.TIMESTAMP(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_experiments_model_name"), "experiments", ["model_name"], unique=False)
op.create_index(op.f("ix_experiments_organization_name"), "experiments", ["organization_name"], unique=False)
op.execute(
"""
CREATE TRIGGER update_experiments_modtime
BEFORE UPDATE
ON experiments
FOR EACH ROW
EXECUTE PROCEDURE update_updated_at_column();
"""
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_experiments_organization_name"), table_name="experiments")
op.drop_index(op.f("ix_experiments_model_name"), table_name="experiments")
op.drop_index(op.f("ix_experiments_creator"), table_name="experiments")
op.drop_table("experiments")
op.execute("DROP FUNCTION update_updated_at_column")
# ### end Alembic commands ###
| collaborative-training-auth-main | backend/app/db/migrations/versions/97659da4900e_create_experiments_table.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
"""add coordinator ip and port
Revision ID: abc051ececd5
Revises: 2dc2179b353f
Create Date: 2021-04-30 14:23:44.114294
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy_utils import IPAddressType
# revision identifiers, used by Alembic
revision = "abc051ececd5"
down_revision = "2dc2179b353f"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("experiments", sa.Column("coordinator_ip", IPAddressType))
op.add_column("experiments", sa.Column("coordinator_port", sa.Integer))
def downgrade() -> None:
op.drop_column("experiments", "coordinator_ip")
op.drop_column("experiments", "coordinator_port")
| collaborative-training-auth-main | backend/app/db/migrations/versions/abc051ececd5_add_coordinator_ip_and_port.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
"""add_keys
Revision ID: ba788d7c81bf
Revises: abc051ececd5
Create Date: 2021-05-03 15:29:01.414138
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic
revision = "ba788d7c81bf"
down_revision = "abc051ececd5"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("whitelist", sa.Column("peer_public_key", sa.Text))
op.add_column("experiments", sa.Column("auth_server_public_key", sa.LargeBinary))
op.add_column("experiments", sa.Column("auth_server_private_key", sa.LargeBinary))
def downgrade() -> None:
op.drop_column("whitelist", "peer_public_key")
op.drop_column("experiments", "auth_server_public_key")
op.drop_column("experiments", "auth_server_private_key")
| collaborative-training-auth-main | backend/app/db/migrations/versions/ba788d7c81bf_add_keys.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
"""create_collaborator
Revision ID: 1ff5763812c1
Revises: ba788d7c81bf
Create Date: 2021-05-04 19:04:16.294453
"""
from typing import Tuple
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic
revision = "1ff5763812c1"
down_revision = "ba788d7c81bf"
branch_labels = None
depends_on = None
def timestamps(indexed: bool = False) -> Tuple[sa.Column, sa.Column]:
return (
sa.Column(
"created_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.func.now(),
nullable=False,
index=indexed,
),
sa.Column(
"updated_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.func.now(),
nullable=False,
index=indexed,
),
)
def create_collaborators_table() -> None:
op.create_table(
"collaborators",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("peer_public_key", sa.LargeBinary, nullable=False, index=False),
sa.Column("user_id", sa.Integer, nullable=False, index=False),
sa.Column("whitelist_item_id", sa.Integer, nullable=False, index=False),
*timestamps(),
)
op.execute(
"""
CREATE TRIGGER update_collaborators_modtime
BEFORE UPDATE
ON collaborators
FOR EACH ROW
EXECUTE PROCEDURE update_updated_at_column();
"""
)
def upgrade() -> None:
op.drop_column("whitelist", "peer_public_key")
create_collaborators_table()
def downgrade() -> None:
op.add_column("whitelist", sa.Column("peer_public_key", sa.Text))
op.drop_table("collaborators")
| collaborative-training-auth-main | backend/app/db/migrations/versions/1ff5763812c1_create_collaborator.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
| collaborative-training-auth-main | backend/app/db/repositories/__init__.py |
#
# Copyright (c) 2021 the Hugging Face team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.#
from ipaddress import IPv4Address, IPv6Address
from typing import List
from fastapi import HTTPException
from starlette.status import HTTP_400_BAD_REQUEST
from app.db.repositories.base import BaseRepository
from app.models.experiment import ExperimentCreate, ExperimentInDB, ExperimentUpdate
from app.services.authentication import MoonlandingUser
COLUMNS = "id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at"
CREATE_EXPERIMENT_QUERY = """
INSERT INTO experiments (organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key)
VALUES (:organization_name, :model_name, :creator, :coordinator_ip, :coordinator_port, :auth_server_public_key, :auth_server_private_key)
RETURNING id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at;
"""
GET_EXPERIMENT_BY_ID_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE id = :id;
"""
GET_EXPERIMENT_BY_ORGANIZATON_AND_MODEL_NAME_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE model_name = :model_name
AND organization_name = :organization_name;
"""
LIST_ALL_USER_EXPERIMENTS_QUERY = """
SELECT id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at
FROM experiments
WHERE creator = :creator;
"""
UPDATE_EXPERIMENT_BY_ID_QUERY = """
UPDATE experiments
SET organization_name = :organization_name,
model_name = :model_name,
coordinator_ip = :coordinator_ip,
coordinator_port = :coordinator_port,
creator = :creator
WHERE id = :id
RETURNING id, organization_name, model_name, creator, coordinator_ip, coordinator_port, auth_server_public_key, auth_server_private_key, created_at, updated_at;
"""
DELETE_EXPERIMENT_BY_ID_QUERY = """
DELETE FROM experiments
WHERE id = :id
RETURNING id;
"""
class ExperimentsRepository(BaseRepository):
""" "
All database actions associated with the Experiment resource
"""
async def create_experiment(
self, *, new_experiment: ExperimentCreate, requesting_user: MoonlandingUser
) -> ExperimentInDB:
new_experiment_table = {**new_experiment.dict(), "creator": requesting_user.username}
if "coordinator_ip" in new_experiment_table.keys() and (
isinstance(new_experiment_table["coordinator_ip"], IPv4Address)
or isinstance(new_experiment_table["coordinator_ip"], IPv6Address)
):
new_experiment_table["coordinator_ip"] = str(new_experiment_table["coordinator_ip"])
experiment = await self.db.fetch_one(query=CREATE_EXPERIMENT_QUERY, values=new_experiment_table)
return ExperimentInDB(**experiment)
async def get_experiment_by_organization_and_model_name(
self, *, organization_name: str, model_name: str
) -> ExperimentInDB:
experiment = await self.db.fetch_one(
query=GET_EXPERIMENT_BY_ORGANIZATON_AND_MODEL_NAME_QUERY,
values={"organization_name": organization_name, "model_name": model_name},
)
if not experiment:
return None
return ExperimentInDB(**experiment)
async def get_experiment_by_id(self, *, id: int) -> ExperimentInDB:
experiment = await self.db.fetch_one(query=GET_EXPERIMENT_BY_ID_QUERY, values={"id": id})
if not experiment:
return None
return ExperimentInDB(**experiment)
async def list_all_user_experiments(self, requesting_user: MoonlandingUser) -> List[ExperimentInDB]:
experiment_records = await self.db.fetch_all(
query=LIST_ALL_USER_EXPERIMENTS_QUERY, values={"creator": requesting_user.username}
)
return [ExperimentInDB(**exp) for exp in experiment_records]
async def update_experiment_by_id(self, *, id_exp: int, experiment_update: ExperimentUpdate) -> ExperimentInDB:
experiment = await self.get_experiment_by_id(id=id_exp)
if not experiment:
return None
experiment_update_params = experiment.copy(update=experiment_update.dict(exclude_unset=True))
values = {
**experiment_update_params.dict(
exclude={
"auth_server_public_key",
"auth_server_private_key",
"created_at",
"updated_at",
}
)
}
if "coordinator_ip" in values.keys() and (
isinstance(values["coordinator_ip"], IPv4Address) or isinstance(values["coordinator_ip"], IPv6Address)
):
values["coordinator_ip"] = str(values["coordinator_ip"])
try:
updated_experiment = await self.db.fetch_one(query=UPDATE_EXPERIMENT_BY_ID_QUERY, values=values)
except Exception as e:
print(e)
raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail="Invalid update params.")
return ExperimentInDB(**updated_experiment)
async def delete_experiment_by_id(self, *, id: int) -> int:
experiment = await self.get_experiment_by_id(id=id)
if not experiment:
return None
deleted_id = await self.db.execute(query=DELETE_EXPERIMENT_BY_ID_QUERY, values={"id": id})
return deleted_id
| collaborative-training-auth-main | backend/app/db/repositories/experiments.py |